Merge ath-next from ath.git. Major changes in ath10k:
* add support for qca99x0 family of devices * improve performance of tx_lock * add support for raw mode (802.11 frame format) and software crypto engine enabled via a module parameter wil6210: * implement TSO support * support bootloader v1 and onwards
This commit is contained in:
commit
94e92a7bff
@ -31,16 +31,19 @@
|
||||
#include "wmi-ops.h"
|
||||
|
||||
unsigned int ath10k_debug_mask;
|
||||
static unsigned int ath10k_cryptmode_param;
|
||||
static bool uart_print;
|
||||
static bool skip_otp;
|
||||
|
||||
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
|
||||
module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
|
||||
module_param(uart_print, bool, 0644);
|
||||
module_param(skip_otp, bool, 0644);
|
||||
|
||||
MODULE_PARM_DESC(debug_mask, "Debugging mask");
|
||||
MODULE_PARM_DESC(uart_print, "Uart target debugging");
|
||||
MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
|
||||
MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
|
||||
|
||||
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
{
|
||||
@ -1073,6 +1076,46 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
|
||||
switch (ath10k_cryptmode_param) {
|
||||
case ATH10K_CRYPT_MODE_HW:
|
||||
clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
|
||||
clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
|
||||
break;
|
||||
case ATH10K_CRYPT_MODE_SW:
|
||||
if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
|
||||
ar->fw_features)) {
|
||||
ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
|
||||
set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
|
||||
break;
|
||||
default:
|
||||
ath10k_info(ar, "invalid cryptmode: %d\n",
|
||||
ath10k_cryptmode_param);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
|
||||
ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
|
||||
|
||||
if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
|
||||
ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW;
|
||||
|
||||
/* Workaround:
|
||||
*
|
||||
* Firmware A-MSDU aggregation breaks with RAW Tx encap mode
|
||||
* and causes enormous performance issues (malformed frames,
|
||||
* etc).
|
||||
*
|
||||
* Disabling A-MSDU makes RAW mode stable with heavy traffic
|
||||
* albeit a bit slower compared to regular operation.
|
||||
*/
|
||||
ar->htt.max_num_amsdu = 1;
|
||||
}
|
||||
|
||||
/* Backwards compatibility for firmwares without
|
||||
* ATH10K_FW_IE_WMI_OP_VERSION.
|
||||
*/
|
||||
@ -1606,6 +1649,10 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
|
||||
if (!ar->workqueue)
|
||||
goto err_free_mac;
|
||||
|
||||
ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq");
|
||||
if (!ar->workqueue_aux)
|
||||
goto err_free_wq;
|
||||
|
||||
mutex_init(&ar->conf_mutex);
|
||||
spin_lock_init(&ar->data_lock);
|
||||
|
||||
@ -1626,10 +1673,12 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
|
||||
|
||||
ret = ath10k_debug_create(ar);
|
||||
if (ret)
|
||||
goto err_free_wq;
|
||||
goto err_free_aux_wq;
|
||||
|
||||
return ar;
|
||||
|
||||
err_free_aux_wq:
|
||||
destroy_workqueue(ar->workqueue_aux);
|
||||
err_free_wq:
|
||||
destroy_workqueue(ar->workqueue);
|
||||
|
||||
@ -1645,6 +1694,9 @@ void ath10k_core_destroy(struct ath10k *ar)
|
||||
flush_workqueue(ar->workqueue);
|
||||
destroy_workqueue(ar->workqueue);
|
||||
|
||||
flush_workqueue(ar->workqueue_aux);
|
||||
destroy_workqueue(ar->workqueue_aux);
|
||||
|
||||
ath10k_debug_destroy(ar);
|
||||
ath10k_mac_destroy(ar);
|
||||
}
|
||||
|
@ -92,6 +92,7 @@ struct ath10k_skb_cb {
|
||||
u8 tid;
|
||||
u16 freq;
|
||||
bool is_offchan;
|
||||
bool nohwcrypt;
|
||||
struct ath10k_htt_txbuf *txbuf;
|
||||
u32 txbuf_paddr;
|
||||
} __packed htt;
|
||||
@ -152,6 +153,7 @@ struct ath10k_wmi {
|
||||
const struct wmi_ops *ops;
|
||||
|
||||
u32 num_mem_chunks;
|
||||
u32 rx_decap_mode;
|
||||
struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
|
||||
};
|
||||
|
||||
@ -341,6 +343,7 @@ struct ath10k_vif {
|
||||
} u;
|
||||
|
||||
bool use_cts_prot;
|
||||
bool nohwcrypt;
|
||||
int num_legacy_stations;
|
||||
int txpower;
|
||||
struct wmi_wmm_params_all_arg wmm_params;
|
||||
@ -382,9 +385,6 @@ struct ath10k_debug {
|
||||
u32 reg_addr;
|
||||
u32 nf_cal_period;
|
||||
|
||||
u8 htt_max_amsdu;
|
||||
u8 htt_max_ampdu;
|
||||
|
||||
struct ath10k_fw_crash_data *fw_crash_data;
|
||||
};
|
||||
|
||||
@ -453,16 +453,21 @@ enum ath10k_fw_features {
|
||||
ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
|
||||
|
||||
/* Don't trust error code from otp.bin */
|
||||
ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
|
||||
ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7,
|
||||
|
||||
/* Some firmware revisions pad 4th hw address to 4 byte boundary making
|
||||
* it 8 bytes long in Native Wifi Rx decap.
|
||||
*/
|
||||
ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
|
||||
ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8,
|
||||
|
||||
/* Firmware supports bypassing PLL setting on init. */
|
||||
ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
|
||||
|
||||
/* Raw mode support. If supported, FW supports receiving and trasmitting
|
||||
* frames in raw mode.
|
||||
*/
|
||||
ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
|
||||
|
||||
/* keep last */
|
||||
ATH10K_FW_FEATURE_COUNT,
|
||||
};
|
||||
@ -476,6 +481,15 @@ enum ath10k_dev_flags {
|
||||
* waiters should immediately cancel instead of waiting for a time out.
|
||||
*/
|
||||
ATH10K_FLAG_CRASH_FLUSH,
|
||||
|
||||
/* Use Raw mode instead of native WiFi Tx/Rx encap mode.
|
||||
* Raw mode supports both hardware and software crypto. Native WiFi only
|
||||
* supports hardware crypto.
|
||||
*/
|
||||
ATH10K_FLAG_RAW_MODE,
|
||||
|
||||
/* Disable HW crypto engine */
|
||||
ATH10K_FLAG_HW_CRYPTO_DISABLED,
|
||||
};
|
||||
|
||||
enum ath10k_cal_mode {
|
||||
@ -484,6 +498,13 @@ enum ath10k_cal_mode {
|
||||
ATH10K_CAL_MODE_DT,
|
||||
};
|
||||
|
||||
enum ath10k_crypt_mode {
|
||||
/* Only use hardware crypto engine */
|
||||
ATH10K_CRYPT_MODE_HW,
|
||||
/* Only use software crypto engine */
|
||||
ATH10K_CRYPT_MODE_SW,
|
||||
};
|
||||
|
||||
static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
|
||||
{
|
||||
switch (mode) {
|
||||
@ -673,6 +694,8 @@ struct ath10k {
|
||||
struct completion vdev_setup_done;
|
||||
|
||||
struct workqueue_struct *workqueue;
|
||||
/* Auxiliary workqueue */
|
||||
struct workqueue_struct *workqueue_aux;
|
||||
|
||||
/* prevents concurrent FW reconfiguration */
|
||||
struct mutex conf_mutex;
|
||||
@ -695,6 +718,9 @@ struct ath10k {
|
||||
int num_active_peers;
|
||||
int num_tids;
|
||||
|
||||
struct work_struct svc_rdy_work;
|
||||
struct sk_buff *svc_rdy_skb;
|
||||
|
||||
struct work_struct offchan_tx_work;
|
||||
struct sk_buff_head offchan_tx_queue;
|
||||
struct completion offchan_tx_completed;
|
||||
|
@ -124,11 +124,11 @@ EXPORT_SYMBOL(ath10k_info);
|
||||
|
||||
void ath10k_print_driver_info(struct ath10k *ar)
|
||||
{
|
||||
char fw_features[128];
|
||||
char fw_features[128] = {};
|
||||
|
||||
ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
|
||||
|
||||
ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d features %s\n",
|
||||
ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
|
||||
ar->hw_params.name,
|
||||
ar->target_version,
|
||||
ar->chip_id,
|
||||
@ -144,6 +144,8 @@ void ath10k_print_driver_info(struct ath10k *ar)
|
||||
ar->htt.op_version,
|
||||
ath10k_cal_mode_str(ar->cal_mode),
|
||||
ar->max_num_stations,
|
||||
test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
|
||||
!test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags),
|
||||
fw_features);
|
||||
ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
|
||||
config_enabled(CONFIG_ATH10K_DEBUG),
|
||||
@ -1363,12 +1365,8 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
if (ar->debug.htt_max_amsdu)
|
||||
amsdu = ar->debug.htt_max_amsdu;
|
||||
|
||||
if (ar->debug.htt_max_ampdu)
|
||||
ampdu = ar->debug.htt_max_ampdu;
|
||||
|
||||
amsdu = ar->htt.max_num_amsdu;
|
||||
ampdu = ar->htt.max_num_ampdu;
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
||||
len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
|
||||
@ -1402,8 +1400,8 @@ static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
|
||||
goto out;
|
||||
|
||||
res = count;
|
||||
ar->debug.htt_max_amsdu = amsdu;
|
||||
ar->debug.htt_max_ampdu = ampdu;
|
||||
ar->htt.max_num_amsdu = amsdu;
|
||||
ar->htt.max_num_ampdu = ampdu;
|
||||
|
||||
out:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
@ -1905,9 +1903,6 @@ void ath10k_debug_stop(struct ath10k *ar)
|
||||
if (ar->debug.htt_stats_mask != 0)
|
||||
cancel_delayed_work(&ar->debug.htt_stats_dwork);
|
||||
|
||||
ar->debug.htt_max_amsdu = 0;
|
||||
ar->debug.htt_max_ampdu = 0;
|
||||
|
||||
ath10k_wmi_pdev_pktlog_disable(ar);
|
||||
}
|
||||
|
||||
|
@ -246,12 +246,31 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
|
||||
}
|
||||
|
||||
status = ath10k_htt_verify_version(htt);
|
||||
if (status)
|
||||
if (status) {
|
||||
ath10k_warn(ar, "failed to verify htt version: %d\n",
|
||||
status);
|
||||
return status;
|
||||
}
|
||||
|
||||
status = ath10k_htt_send_frag_desc_bank_cfg(htt);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
return ath10k_htt_send_rx_ring_cfg_ll(htt);
|
||||
status = ath10k_htt_send_rx_ring_cfg_ll(htt);
|
||||
if (status) {
|
||||
ath10k_warn(ar, "failed to setup rx ring: %d\n",
|
||||
status);
|
||||
return status;
|
||||
}
|
||||
|
||||
status = ath10k_htt_h2t_aggr_cfg_msg(htt,
|
||||
htt->max_num_ampdu,
|
||||
htt->max_num_amsdu);
|
||||
if (status) {
|
||||
ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n",
|
||||
status);
|
||||
return status;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -83,15 +83,39 @@ struct htt_ver_req {
|
||||
* around the mask + shift defs.
|
||||
*/
|
||||
struct htt_data_tx_desc_frag {
|
||||
__le32 paddr;
|
||||
__le32 len;
|
||||
union {
|
||||
struct double_word_addr {
|
||||
__le32 paddr;
|
||||
__le32 len;
|
||||
} __packed dword_addr;
|
||||
struct triple_word_addr {
|
||||
__le32 paddr_lo;
|
||||
__le16 paddr_hi;
|
||||
__le16 len_16;
|
||||
} __packed tword_addr;
|
||||
} __packed;
|
||||
} __packed;
|
||||
|
||||
struct htt_msdu_ext_desc {
|
||||
__le32 tso_flag[4];
|
||||
__le32 tso_flag[3];
|
||||
__le16 ip_identification;
|
||||
u8 flags;
|
||||
u8 reserved;
|
||||
struct htt_data_tx_desc_frag frags[6];
|
||||
};
|
||||
|
||||
#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0)
|
||||
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
|
||||
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
|
||||
#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3)
|
||||
#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4)
|
||||
|
||||
#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
|
||||
| HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
|
||||
| HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
|
||||
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
|
||||
| HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
|
||||
|
||||
enum htt_data_tx_desc_flags0 {
|
||||
HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
|
||||
HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
|
||||
@ -260,6 +284,9 @@ struct htt_aggr_conf {
|
||||
} __packed;
|
||||
|
||||
#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
|
||||
struct htt_mgmt_tx_desc_qca99x0 {
|
||||
__le32 rate;
|
||||
} __packed;
|
||||
|
||||
struct htt_mgmt_tx_desc {
|
||||
u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
|
||||
@ -268,6 +295,9 @@ struct htt_mgmt_tx_desc {
|
||||
__le32 len;
|
||||
__le32 vdev_id;
|
||||
u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
|
||||
union {
|
||||
struct htt_mgmt_tx_desc_qca99x0 qca99x0;
|
||||
} __packed;
|
||||
} __packed;
|
||||
|
||||
enum htt_mgmt_tx_status {
|
||||
@ -1366,6 +1396,8 @@ struct ath10k_htt {
|
||||
u8 target_version_minor;
|
||||
struct completion target_version_received;
|
||||
enum ath10k_fw_htt_op_version op_version;
|
||||
u8 max_num_amsdu;
|
||||
u8 max_num_ampdu;
|
||||
|
||||
const enum htt_t2h_msg_type *t2h_msg_types;
|
||||
u32 t2h_msg_types_max;
|
||||
@ -1528,6 +1560,12 @@ struct htt_rx_desc {
|
||||
#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
|
||||
#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
|
||||
|
||||
/* These values are default in most firmware revisions and apparently are a
|
||||
* sweet spot performance wise.
|
||||
*/
|
||||
#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
|
||||
#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
|
||||
|
||||
int ath10k_htt_connect(struct ath10k_htt *htt);
|
||||
int ath10k_htt_init(struct ath10k *ar);
|
||||
int ath10k_htt_setup(struct ath10k_htt *htt);
|
||||
|
@ -368,7 +368,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
|
||||
msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
|
||||
& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
|
||||
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
|
||||
msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
|
||||
msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
|
||||
RX_MSDU_START_INFO0_MSDU_LENGTH);
|
||||
msdu_chained = rx_desc->frag_info.ring2_more_count;
|
||||
|
||||
@ -394,7 +394,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
|
||||
msdu_chaining = 1;
|
||||
}
|
||||
|
||||
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
|
||||
last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
|
||||
RX_MSDU_END_INFO0_LAST_MSDU;
|
||||
|
||||
trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
|
||||
@ -740,7 +740,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
|
||||
__cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
|
||||
return NULL;
|
||||
|
||||
if (!(rxd->msdu_end.info0 &
|
||||
if (!(rxd->msdu_end.common.info0 &
|
||||
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
|
||||
return NULL;
|
||||
|
||||
@ -991,9 +991,9 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
|
||||
bool is_last;
|
||||
|
||||
rxd = (void *)msdu->data - sizeof(*rxd);
|
||||
is_first = !!(rxd->msdu_end.info0 &
|
||||
is_first = !!(rxd->msdu_end.common.info0 &
|
||||
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
|
||||
is_last = !!(rxd->msdu_end.info0 &
|
||||
is_last = !!(rxd->msdu_end.common.info0 &
|
||||
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
|
||||
|
||||
/* Delivered decapped frame:
|
||||
@ -1017,9 +1017,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
|
||||
skb_trim(msdu, msdu->len - FCS_LEN);
|
||||
|
||||
/* In most cases this will be true for sniffed frames. It makes sense
|
||||
* to deliver them as-is without stripping the crypto param. This would
|
||||
* also make sense for software based decryption (which is not
|
||||
* implemented in ath10k).
|
||||
* to deliver them as-is without stripping the crypto param. This is
|
||||
* necessary for software based decryption.
|
||||
*
|
||||
* If there's no error then the frame is decrypted. At least that is
|
||||
* the case for frames that come in via fragmented rx indication.
|
||||
@ -1104,9 +1103,9 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
|
||||
rxd = (void *)msdu->data - sizeof(*rxd);
|
||||
hdr = (void *)rxd->rx_hdr_status;
|
||||
|
||||
is_first = !!(rxd->msdu_end.info0 &
|
||||
is_first = !!(rxd->msdu_end.common.info0 &
|
||||
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
|
||||
is_last = !!(rxd->msdu_end.info0 &
|
||||
is_last = !!(rxd->msdu_end.common.info0 &
|
||||
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
|
||||
is_amsdu = !(is_first && is_last);
|
||||
|
||||
@ -1214,7 +1213,7 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
|
||||
*/
|
||||
|
||||
rxd = (void *)msdu->data - sizeof(*rxd);
|
||||
decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
|
||||
decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
|
||||
RX_MSDU_START_INFO1_DECAP_FORMAT);
|
||||
|
||||
switch (decap) {
|
||||
@ -1244,7 +1243,7 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
|
||||
|
||||
rxd = (void *)skb->data - sizeof(*rxd);
|
||||
flags = __le32_to_cpu(rxd->attention.flags);
|
||||
info = __le32_to_cpu(rxd->msdu_start.info1);
|
||||
info = __le32_to_cpu(rxd->msdu_start.common.info1);
|
||||
|
||||
is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
|
||||
is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
|
||||
@ -1437,7 +1436,7 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
|
||||
|
||||
first = skb_peek(amsdu);
|
||||
rxd = (void *)first->data - sizeof(*rxd);
|
||||
decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
|
||||
decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
|
||||
RX_MSDU_START_INFO1_DECAP_FORMAT);
|
||||
|
||||
if (!chained)
|
||||
@ -1631,8 +1630,6 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
|
||||
__le16 msdu_id;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&htt->tx_lock);
|
||||
|
||||
switch (status) {
|
||||
case HTT_DATA_TX_STATUS_NO_ACK:
|
||||
tx_done.no_ack = true;
|
||||
@ -1757,14 +1754,14 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
|
||||
__skb_queue_tail(amsdu, msdu);
|
||||
|
||||
rxd = (void *)msdu->data - sizeof(*rxd);
|
||||
if (rxd->msdu_end.info0 &
|
||||
if (rxd->msdu_end.common.info0 &
|
||||
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
|
||||
break;
|
||||
}
|
||||
|
||||
msdu = skb_peek_tail(amsdu);
|
||||
rxd = (void *)msdu->data - sizeof(*rxd);
|
||||
if (!(rxd->msdu_end.info0 &
|
||||
if (!(rxd->msdu_end.common.info0 &
|
||||
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
|
||||
skb_queue_splice_init(amsdu, list);
|
||||
return -EAGAIN;
|
||||
@ -1998,15 +1995,11 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
ath10k_txrx_tx_unref(htt, &tx_done);
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
break;
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
__skb_queue_tail(&htt->tx_compl_q, skb);
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
skb_queue_tail(&htt->tx_compl_q, skb);
|
||||
tasklet_schedule(&htt->txrx_compl_task);
|
||||
return;
|
||||
case HTT_T2H_MSG_TYPE_SEC_IND: {
|
||||
@ -2072,6 +2065,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
break;
|
||||
case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
|
||||
break;
|
||||
case HTT_T2H_MSG_TYPE_AGGR_CONF:
|
||||
break;
|
||||
case HTT_T2H_MSG_TYPE_EN_STATS:
|
||||
case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
|
||||
case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
|
||||
@ -2095,12 +2090,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
|
||||
struct htt_resp *resp;
|
||||
struct sk_buff *skb;
|
||||
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
|
||||
while ((skb = skb_dequeue(&htt->tx_compl_q))) {
|
||||
ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
|
||||
spin_lock_bh(&htt->rx_ring.lock);
|
||||
while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
|
||||
|
@ -63,7 +63,8 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
|
||||
|
||||
lockdep_assert_held(&htt->tx_lock);
|
||||
|
||||
ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
|
||||
ret = idr_alloc(&htt->pending_tx, skb, 0,
|
||||
htt->max_num_pending_tx, GFP_ATOMIC);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
|
||||
|
||||
@ -133,9 +134,7 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
|
||||
tx_done.discard = 1;
|
||||
tx_done.msdu_id = msdu_id;
|
||||
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
ath10k_txrx_tx_unref(htt, &tx_done);
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -259,6 +258,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
|
||||
cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
|
||||
cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
|
||||
__cpu_to_le32(htt->frag_desc.paddr);
|
||||
cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
|
||||
cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
|
||||
__cpu_to_le16(htt->max_num_pending_tx - 1);
|
||||
|
||||
@ -427,12 +427,11 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
if (res < 0) {
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
goto err_tx_dec;
|
||||
}
|
||||
msdu_id = res;
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
|
||||
txdesc = ath10k_htc_alloc_skb(ar, len);
|
||||
if (!txdesc) {
|
||||
@ -448,6 +447,8 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
|
||||
skb_put(txdesc, len);
|
||||
cmd = (struct htt_cmd *)txdesc->data;
|
||||
memset(cmd, 0, len);
|
||||
|
||||
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
|
||||
cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
|
||||
cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
|
||||
@ -494,6 +495,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
u16 msdu_id, flags1 = 0;
|
||||
dma_addr_t paddr = 0;
|
||||
u32 frags_paddr = 0;
|
||||
struct htt_msdu_ext_desc *ext_desc = NULL;
|
||||
|
||||
res = ath10k_htt_tx_inc_pending(htt);
|
||||
if (res)
|
||||
@ -501,12 +503,11 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
if (res < 0) {
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
goto err_tx_dec;
|
||||
}
|
||||
msdu_id = res;
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
|
||||
prefetch_len = min(htt->prefetch_len, msdu->len);
|
||||
prefetch_len = roundup(prefetch_len, 4);
|
||||
@ -522,8 +523,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
if ((ieee80211_is_action(hdr->frame_control) ||
|
||||
ieee80211_is_deauth(hdr->frame_control) ||
|
||||
ieee80211_is_disassoc(hdr->frame_control)) &&
|
||||
ieee80211_has_protected(hdr->frame_control))
|
||||
ieee80211_has_protected(hdr->frame_control)) {
|
||||
skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
|
||||
} else if (!skb_cb->htt.nohwcrypt &&
|
||||
skb_cb->txmode == ATH10K_HW_TXRX_RAW) {
|
||||
skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
|
||||
}
|
||||
|
||||
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
|
||||
DMA_TO_DEVICE);
|
||||
@ -537,16 +542,30 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
|
||||
/* pass through */
|
||||
case ATH10K_HW_TXRX_ETHERNET:
|
||||
frags = skb_cb->htt.txbuf->frags;
|
||||
if (ar->hw_params.continuous_frag_desc) {
|
||||
memset(&htt->frag_desc.vaddr[msdu_id], 0,
|
||||
sizeof(struct htt_msdu_ext_desc));
|
||||
frags = (struct htt_data_tx_desc_frag *)
|
||||
&htt->frag_desc.vaddr[msdu_id].frags;
|
||||
ext_desc = &htt->frag_desc.vaddr[msdu_id];
|
||||
frags[0].tword_addr.paddr_lo =
|
||||
__cpu_to_le32(skb_cb->paddr);
|
||||
frags[0].tword_addr.paddr_hi = 0;
|
||||
frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
|
||||
|
||||
frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
|
||||
frags[0].len = __cpu_to_le32(msdu->len);
|
||||
frags[1].paddr = 0;
|
||||
frags[1].len = 0;
|
||||
frags_paddr = htt->frag_desc.paddr +
|
||||
(sizeof(struct htt_msdu_ext_desc) * msdu_id);
|
||||
} else {
|
||||
frags = skb_cb->htt.txbuf->frags;
|
||||
frags[0].dword_addr.paddr =
|
||||
__cpu_to_le32(skb_cb->paddr);
|
||||
frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
|
||||
frags[1].dword_addr.paddr = 0;
|
||||
frags[1].dword_addr.len = 0;
|
||||
|
||||
frags_paddr = skb_cb->htt.txbuf_paddr;
|
||||
}
|
||||
flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
|
||||
|
||||
frags_paddr = skb_cb->htt.txbuf_paddr;
|
||||
break;
|
||||
case ATH10K_HW_TXRX_MGMT:
|
||||
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
|
||||
@ -580,14 +599,20 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
prefetch_len);
|
||||
skb_cb->htt.txbuf->htc_hdr.flags = 0;
|
||||
|
||||
if (skb_cb->htt.nohwcrypt)
|
||||
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
|
||||
|
||||
if (!skb_cb->is_protected)
|
||||
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
|
||||
|
||||
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
|
||||
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
|
||||
if (msdu->ip_summed == CHECKSUM_PARTIAL) {
|
||||
if (msdu->ip_summed == CHECKSUM_PARTIAL &&
|
||||
!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
|
||||
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
|
||||
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
|
||||
if (ar->hw_params.continuous_frag_desc)
|
||||
ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
|
||||
}
|
||||
|
||||
/* Prevent firmware from sending up tx inspection requests. There's
|
||||
|
@ -217,14 +217,16 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
|
||||
#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
|
||||
|
||||
/* Known pecularities:
|
||||
* - current FW doesn't support raw rx mode (last tested v599)
|
||||
* - current FW dumps upon raw tx mode (last tested v599)
|
||||
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
|
||||
* - raw have FCS, nwifi doesn't
|
||||
* - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
|
||||
* param, llc/snap) are aligned to 4byte boundaries each */
|
||||
enum ath10k_hw_txrx_mode {
|
||||
ATH10K_HW_TXRX_RAW = 0,
|
||||
|
||||
/* Native Wifi decap mode is used to align IP frames to 4-byte
|
||||
* boundaries and avoid a very expensive re-alignment in mac80211.
|
||||
*/
|
||||
ATH10K_HW_TXRX_NATIVE_WIFI = 1,
|
||||
ATH10K_HW_TXRX_ETHERNET = 2,
|
||||
|
||||
@ -286,10 +288,6 @@ enum ath10k_hw_rate_cck {
|
||||
#define TARGET_RX_TIMEOUT_LO_PRI 100
|
||||
#define TARGET_RX_TIMEOUT_HI_PRI 40
|
||||
|
||||
/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
|
||||
* avoid a very expensive re-alignment in mac80211. */
|
||||
#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
|
||||
|
||||
#define TARGET_SCAN_MAX_PENDING_REQS 4
|
||||
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
|
||||
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
|
||||
@ -324,7 +322,6 @@ enum ath10k_hw_rate_cck {
|
||||
#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
|
||||
#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
|
||||
#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
|
||||
#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
|
||||
#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
|
||||
#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
|
||||
#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
|
||||
@ -363,10 +360,7 @@ enum ath10k_hw_rate_cck {
|
||||
(TARGET_10_4_NUM_VDEVS))
|
||||
#define TARGET_10_4_ACTIVE_PEERS 0
|
||||
|
||||
/* TODO: increase qcache max client limit to 512 after
|
||||
* testing with 512 client.
|
||||
*/
|
||||
#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 256
|
||||
#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512
|
||||
#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50
|
||||
#define TARGET_10_4_NUM_OFFLOAD_PEERS 0
|
||||
#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0
|
||||
|
@ -197,6 +197,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
|
||||
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
}
|
||||
|
||||
if (cmd == DISABLE_KEY) {
|
||||
arg.key_cipher = WMI_CIPHER_NONE;
|
||||
arg.key_data = NULL;
|
||||
@ -218,6 +222,9 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
|
||||
|
||||
reinit_completion(&ar->install_key_done);
|
||||
|
||||
if (arvif->nohwcrypt)
|
||||
return 1;
|
||||
|
||||
ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -256,7 +263,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
|
||||
|
||||
ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
|
||||
addr, flags);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
flags = 0;
|
||||
@ -264,7 +271,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
|
||||
|
||||
ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
|
||||
addr, flags);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
@ -322,10 +329,10 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
|
||||
/* key flags are not required to delete the key */
|
||||
ret = ath10k_install_key(arvif, peer->keys[i],
|
||||
DISABLE_KEY, addr, flags);
|
||||
if (ret && first_errno == 0)
|
||||
if (ret < 0 && first_errno == 0)
|
||||
first_errno = ret;
|
||||
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
|
||||
i, ret);
|
||||
|
||||
@ -398,7 +405,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
|
||||
break;
|
||||
/* key flags are not required to delete the key */
|
||||
ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
|
||||
if (ret && first_errno == 0)
|
||||
if (ret < 0 && first_errno == 0)
|
||||
first_errno = ret;
|
||||
|
||||
if (ret)
|
||||
@ -591,11 +598,19 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
|
||||
static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
|
||||
enum wmi_peer_type peer_type)
|
||||
{
|
||||
struct ath10k_vif *arvif;
|
||||
int num_peers = 0;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ar->conf_mutex);
|
||||
|
||||
if (ar->num_peers >= ar->max_num_peers)
|
||||
num_peers = ar->num_peers;
|
||||
|
||||
/* Each vdev consumes a peer entry as well */
|
||||
list_for_each_entry(arvif, &ar->arvifs, list)
|
||||
num_peers++;
|
||||
|
||||
if (num_peers >= ar->max_num_peers)
|
||||
return -ENOBUFS;
|
||||
|
||||
ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
|
||||
@ -671,20 +686,6 @@ static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
|
||||
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
|
||||
}
|
||||
|
||||
static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
u32 vdev_param;
|
||||
|
||||
if (value != 0xFFFFFFFF)
|
||||
value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
|
||||
ATH10K_FRAGMT_THRESHOLD_MIN,
|
||||
ATH10K_FRAGMT_THRESHOLD_MAX);
|
||||
|
||||
vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
|
||||
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
|
||||
}
|
||||
|
||||
static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
|
||||
{
|
||||
int ret;
|
||||
@ -836,7 +837,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
|
||||
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
|
||||
{
|
||||
struct cfg80211_chan_def *chandef = NULL;
|
||||
struct ieee80211_channel *channel = chandef->chan;
|
||||
struct ieee80211_channel *channel = NULL;
|
||||
struct wmi_vdev_start_request_arg arg = {};
|
||||
int ret = 0;
|
||||
|
||||
@ -2502,6 +2503,9 @@ static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
|
||||
u32 param;
|
||||
u32 value;
|
||||
|
||||
if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
|
||||
return 0;
|
||||
|
||||
if (!(ar->vht_cap_info &
|
||||
(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
|
||||
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
|
||||
@ -3149,13 +3153,30 @@ ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
|
||||
* Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
|
||||
* NativeWifi txmode - it selects AP key instead of peer key. It seems
|
||||
* to work with Ethernet txmode so use it.
|
||||
*
|
||||
* FIXME: Check if raw mode works with TDLS.
|
||||
*/
|
||||
if (ieee80211_is_data_present(fc) && sta && sta->tdls)
|
||||
return ATH10K_HW_TXRX_ETHERNET;
|
||||
|
||||
if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
|
||||
return ATH10K_HW_TXRX_RAW;
|
||||
|
||||
return ATH10K_HW_TXRX_NATIVE_WIFI;
|
||||
}
|
||||
|
||||
static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
|
||||
struct sk_buff *skb) {
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
|
||||
IEEE80211_TX_CTL_INJECTED;
|
||||
if ((info->flags & mask) == mask)
|
||||
return false;
|
||||
if (vif)
|
||||
return !ath10k_vif_to_arvif(vif)->nohwcrypt;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
|
||||
* Control in the header.
|
||||
*/
|
||||
@ -3322,6 +3343,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
|
||||
int vdev_id;
|
||||
int ret;
|
||||
unsigned long time_left;
|
||||
bool tmp_peer_created = false;
|
||||
|
||||
/* FW requirement: We must create a peer before FW will send out
|
||||
* an offchannel frame. Otherwise the frame will be stuck and
|
||||
@ -3359,6 +3381,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
|
||||
if (ret)
|
||||
ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
|
||||
peer_addr, vdev_id, ret);
|
||||
tmp_peer_created = (ret == 0);
|
||||
}
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
@ -3374,7 +3397,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
|
||||
ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
|
||||
skb);
|
||||
|
||||
if (!peer) {
|
||||
if (!peer && tmp_peer_created) {
|
||||
ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
|
||||
if (ret)
|
||||
ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
|
||||
@ -3600,6 +3623,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
|
||||
ATH10K_SKB_CB(skb)->htt.is_offchan = false;
|
||||
ATH10K_SKB_CB(skb)->htt.freq = 0;
|
||||
ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
|
||||
ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb);
|
||||
ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
|
||||
ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
|
||||
ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
|
||||
@ -3615,12 +3639,11 @@ static void ath10k_tx(struct ieee80211_hw *hw,
|
||||
ath10k_tx_h_8023(skb);
|
||||
break;
|
||||
case ATH10K_HW_TXRX_RAW:
|
||||
/* FIXME: Packet injection isn't implemented. It should be
|
||||
* doable with firmware 10.2 on qca988x.
|
||||
*/
|
||||
WARN_ON_ONCE(1);
|
||||
ieee80211_free_txskb(hw, skb);
|
||||
return;
|
||||
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
|
||||
WARN_ON_ONCE(1);
|
||||
ieee80211_free_txskb(hw, skb);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
|
||||
@ -4019,6 +4042,43 @@ static u32 get_nss_from_chainmask(u16 chain_mask)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
|
||||
{
|
||||
u32 value = 0;
|
||||
struct ath10k *ar = arvif->ar;
|
||||
|
||||
if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
|
||||
return 0;
|
||||
|
||||
if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
|
||||
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
|
||||
value |= SM((ar->num_rf_chains - 1), WMI_TXBF_STS_CAP_OFFSET);
|
||||
|
||||
if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
|
||||
IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
|
||||
value |= SM((ar->num_rf_chains - 1), WMI_BF_SOUND_DIM_OFFSET);
|
||||
|
||||
if (!value)
|
||||
return 0;
|
||||
|
||||
if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
|
||||
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
|
||||
|
||||
if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
|
||||
value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
|
||||
WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
|
||||
|
||||
if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
|
||||
value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
|
||||
|
||||
if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
|
||||
value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
|
||||
WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
|
||||
|
||||
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
|
||||
ar->wmi.vdev_param->txbf, value);
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
* Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
|
||||
@ -4060,6 +4120,11 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
|
||||
sizeof(arvif->bitrate_mask.control[i].vht_mcs));
|
||||
}
|
||||
|
||||
if (ar->num_peers >= ar->max_num_peers) {
|
||||
ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
if (ar->free_vdev_map == 0) {
|
||||
ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
|
||||
ret = -EBUSY;
|
||||
@ -4139,6 +4204,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
|
||||
arvif->nohwcrypt = true;
|
||||
|
||||
if (arvif->nohwcrypt &&
|
||||
!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
|
||||
ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
|
||||
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
|
||||
@ -4237,16 +4310,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
|
||||
}
|
||||
}
|
||||
|
||||
ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
|
||||
ret = ath10k_mac_set_txbf_conf(arvif);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
|
||||
ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
goto err_peer_delete;
|
||||
}
|
||||
|
||||
ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
|
||||
ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n",
|
||||
ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
goto err_peer_delete;
|
||||
}
|
||||
@ -4728,6 +4801,9 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
|
||||
return 1;
|
||||
|
||||
if (arvif->nohwcrypt)
|
||||
return 1;
|
||||
|
||||
if (key->keyidx > WMI_MAX_KEY_INDEX)
|
||||
return -ENOSPC;
|
||||
|
||||
@ -4797,6 +4873,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
|
||||
ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
|
||||
if (ret) {
|
||||
WARN_ON(ret > 0);
|
||||
ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
|
||||
arvif->vdev_id, peer_addr, ret);
|
||||
goto exit;
|
||||
@ -4812,13 +4889,16 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
|
||||
ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
|
||||
if (ret) {
|
||||
WARN_ON(ret > 0);
|
||||
ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
|
||||
arvif->vdev_id, peer_addr, ret);
|
||||
ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
|
||||
peer_addr, flags);
|
||||
if (ret2)
|
||||
if (ret2) {
|
||||
WARN_ON(ret2 > 0);
|
||||
ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
|
||||
arvif->vdev_id, peer_addr, ret2);
|
||||
}
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
@ -5545,6 +5625,21 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
|
||||
{
|
||||
/* Even though there's a WMI enum for fragmentation threshold no known
|
||||
* firmware actually implements it. Moreover it is not possible to rely
|
||||
* frame fragmentation to mac80211 because firmware clears the "more
|
||||
* fragments" bit in frame control making it impossible for remote
|
||||
* devices to reassemble frames.
|
||||
*
|
||||
* Hence implement a dummy callback just to say fragmentation isn't
|
||||
* supported. This effectively prevents mac80211 from doing frame
|
||||
* fragmentation in software.
|
||||
*/
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
u32 queues, bool drop)
|
||||
{
|
||||
@ -6387,6 +6482,7 @@ static const struct ieee80211_ops ath10k_ops = {
|
||||
.remain_on_channel = ath10k_remain_on_channel,
|
||||
.cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
|
||||
.set_rts_threshold = ath10k_set_rts_threshold,
|
||||
.set_frag_threshold = ath10k_mac_op_set_frag_threshold,
|
||||
.flush = ath10k_flush,
|
||||
.tx_last_beacon = ath10k_tx_last_beacon,
|
||||
.set_antenna = ath10k_set_antenna,
|
||||
@ -6892,7 +6988,6 @@ int ath10k_mac_register(struct ath10k *ar)
|
||||
ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
|
||||
ieee80211_hw_set(ar->hw, AP_LINK_PS);
|
||||
ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
|
||||
ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
|
||||
ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
|
||||
ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
|
||||
ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
|
||||
@ -6900,6 +6995,9 @@ int ath10k_mac_register(struct ath10k *ar)
|
||||
ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
|
||||
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
|
||||
|
||||
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
|
||||
ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
|
||||
|
||||
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
|
||||
ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
|
||||
|
||||
@ -7003,7 +7101,8 @@ int ath10k_mac_register(struct ath10k *ar)
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
ar->hw->netdev_features = NETIF_F_HW_CSUM;
|
||||
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
|
||||
ar->hw->netdev_features = NETIF_F_HW_CSUM;
|
||||
|
||||
if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
|
||||
/* Init ath dfs pattern detector */
|
||||
|
@ -64,6 +64,7 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
|
||||
static const struct pci_device_id ath10k_pci_id_table[] = {
|
||||
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
|
||||
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
|
||||
{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
|
||||
{0}
|
||||
};
|
||||
|
||||
@ -78,6 +79,7 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
|
||||
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
|
||||
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
|
||||
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
|
||||
{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
|
||||
};
|
||||
|
||||
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
|
||||
@ -2761,7 +2763,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
|
||||
|
||||
static int ath10k_pci_cold_reset(struct ath10k *ar)
|
||||
{
|
||||
int i;
|
||||
u32 val;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
|
||||
@ -2777,23 +2778,18 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
|
||||
val |= 1;
|
||||
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
|
||||
|
||||
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
|
||||
if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
|
||||
RTC_STATE_COLD_RESET_MASK)
|
||||
break;
|
||||
msleep(1);
|
||||
}
|
||||
/* After writing into SOC_GLOBAL_RESET to put device into
|
||||
* reset and pulling out of reset pcie may not be stable
|
||||
* for any immediate pcie register access and cause bus error,
|
||||
* add delay before any pcie access request to fix this issue.
|
||||
*/
|
||||
msleep(20);
|
||||
|
||||
/* Pull Target, including PCIe, out of RESET. */
|
||||
val &= ~1;
|
||||
ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
|
||||
|
||||
for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
|
||||
if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
|
||||
RTC_STATE_COLD_RESET_MASK))
|
||||
break;
|
||||
msleep(1);
|
||||
}
|
||||
msleep(20);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
|
||||
|
||||
|
@ -422,6 +422,12 @@ struct rx_mpdu_end {
|
||||
#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
|
||||
#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
|
||||
|
||||
#define RX_MSDU_START_INFO2_DA_IDX_MASK 0x000007ff
|
||||
#define RX_MSDU_START_INFO2_DA_IDX_LSB 0
|
||||
#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000
|
||||
#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB 16
|
||||
#define RX_MSDU_START_INFO2_DA_BCAST_MCAST BIT(11)
|
||||
|
||||
/* The decapped header (rx_hdr_status) contains the following:
|
||||
* a) 802.11 header
|
||||
* [padding to 4 bytes]
|
||||
@ -449,12 +455,23 @@ enum rx_msdu_decap_format {
|
||||
RX_MSDU_DECAP_8023_SNAP_LLC = 3
|
||||
};
|
||||
|
||||
struct rx_msdu_start {
|
||||
struct rx_msdu_start_common {
|
||||
__le32 info0; /* %RX_MSDU_START_INFO0_ */
|
||||
__le32 flow_id_crc;
|
||||
__le32 info1; /* %RX_MSDU_START_INFO1_ */
|
||||
} __packed;
|
||||
|
||||
struct rx_msdu_start_qca99x0 {
|
||||
__le32 info2; /* %RX_MSDU_START_INFO2_ */
|
||||
} __packed;
|
||||
|
||||
struct rx_msdu_start {
|
||||
struct rx_msdu_start_common common;
|
||||
union {
|
||||
struct rx_msdu_start_qca99x0 qca99x0;
|
||||
} __packed;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* msdu_length
|
||||
* MSDU length in bytes after decapsulation. This field is
|
||||
@ -540,7 +557,7 @@ struct rx_msdu_start {
|
||||
#define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30)
|
||||
#define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31)
|
||||
|
||||
struct rx_msdu_end {
|
||||
struct rx_msdu_end_common {
|
||||
__le16 ip_hdr_cksum;
|
||||
__le16 tcp_hdr_cksum;
|
||||
u8 key_id_octet;
|
||||
@ -549,6 +566,36 @@ struct rx_msdu_end {
|
||||
__le32 info0;
|
||||
} __packed;
|
||||
|
||||
#define RX_MSDU_END_INFO1_TCP_FLAG_MASK 0x000001ff
|
||||
#define RX_MSDU_END_INFO1_TCP_FLAG_LSB 0
|
||||
#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK 0x00001c00
|
||||
#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB 10
|
||||
#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK 0xffff0000
|
||||
#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB 16
|
||||
#define RX_MSDU_END_INFO1_IRO_ELIGIBLE BIT(9)
|
||||
|
||||
#define RX_MSDU_END_INFO2_DA_OFFSET_MASK 0x0000003f
|
||||
#define RX_MSDU_END_INFO2_DA_OFFSET_LSB 0
|
||||
#define RX_MSDU_END_INFO2_SA_OFFSET_MASK 0x00000fc0
|
||||
#define RX_MSDU_END_INFO2_SA_OFFSET_LSB 6
|
||||
#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK 0x0003f000
|
||||
#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB 12
|
||||
|
||||
struct rx_msdu_end_qca99x0 {
|
||||
__le32 ipv6_crc;
|
||||
__le32 tcp_seq_no;
|
||||
__le32 tcp_ack_no;
|
||||
__le32 info1;
|
||||
__le32 info2;
|
||||
} __packed;
|
||||
|
||||
struct rx_msdu_end {
|
||||
struct rx_msdu_end_common common;
|
||||
union {
|
||||
struct rx_msdu_end_qca99x0 qca99x0;
|
||||
} __packed;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
*ip_hdr_chksum
|
||||
* This can include the IP header checksum or the pseudo header
|
||||
@ -870,7 +917,11 @@ struct rx_ppdu_start {
|
||||
#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
|
||||
#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
|
||||
|
||||
#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
|
||||
#define RX_PPDU_END_INFO1_PEER_IDX_MASK 0x1ffc
|
||||
#define RX_PPDU_END_INFO1_PEER_IDX_LSB 2
|
||||
#define RX_PPDU_END_INFO1_BB_DATA BIT(0)
|
||||
#define RX_PPDU_END_INFO1_PEER_IDX_VALID BIT(1)
|
||||
#define RX_PPDU_END_INFO1_PPDU_DONE BIT(15)
|
||||
|
||||
struct rx_ppdu_end_common {
|
||||
__le32 evm_p0;
|
||||
@ -891,13 +942,13 @@ struct rx_ppdu_end_common {
|
||||
__le32 evm_p15;
|
||||
__le32 tsf_timestamp;
|
||||
__le32 wb_timestamp;
|
||||
} __packed;
|
||||
|
||||
struct rx_ppdu_end_qca988x {
|
||||
u8 locationing_timestamp;
|
||||
u8 phy_err_code;
|
||||
__le16 flags; /* %RX_PPDU_END_FLAGS_ */
|
||||
__le32 info0; /* %RX_PPDU_END_INFO0_ */
|
||||
} __packed;
|
||||
|
||||
struct rx_ppdu_end_qca988x {
|
||||
__le16 bb_length;
|
||||
__le16 info1; /* %RX_PPDU_END_INFO1_ */
|
||||
} __packed;
|
||||
@ -909,16 +960,126 @@ struct rx_ppdu_end_qca988x {
|
||||
#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
|
||||
|
||||
struct rx_ppdu_end_qca6174 {
|
||||
u8 locationing_timestamp;
|
||||
u8 phy_err_code;
|
||||
__le16 flags; /* %RX_PPDU_END_FLAGS_ */
|
||||
__le32 info0; /* %RX_PPDU_END_INFO0_ */
|
||||
__le32 rtt; /* %RX_PPDU_END_RTT_ */
|
||||
__le16 bb_length;
|
||||
__le16 info1; /* %RX_PPDU_END_INFO1_ */
|
||||
} __packed;
|
||||
|
||||
#define RX_PKT_END_INFO0_RX_SUCCESS BIT(0)
|
||||
#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX BIT(3)
|
||||
#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP BIT(4)
|
||||
#define RX_PKT_END_INFO0_ERR_OFDM_RESTART BIT(5)
|
||||
#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP BIT(6)
|
||||
#define RX_PKT_END_INFO0_ERR_CCK_RESTART BIT(7)
|
||||
|
||||
#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK 0x0001ffff
|
||||
#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB 0
|
||||
#define RX_LOCATION_INFO_FAC_STATUS_MASK 0x000c0000
|
||||
#define RX_LOCATION_INFO_FAC_STATUS_LSB 18
|
||||
#define RX_LOCATION_INFO_PKT_BW_MASK 0x00700000
|
||||
#define RX_LOCATION_INFO_PKT_BW_LSB 20
|
||||
#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000
|
||||
#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB 23
|
||||
#define RX_LOCATION_INFO_CIR_STATUS BIT(17)
|
||||
#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE BIT(25)
|
||||
#define RX_LOCATION_INFO_RTT_TX_DATA_START_X BIT(26)
|
||||
#define RX_LOCATION_INFO_HW_IFFT_MODE BIT(30)
|
||||
#define RX_LOCATION_INFO_RX_LOCATION_VALID BIT(31)
|
||||
|
||||
struct rx_pkt_end {
|
||||
__le32 info0; /* %RX_PKT_END_INFO0_ */
|
||||
__le32 phy_timestamp_1;
|
||||
__le32 phy_timestamp_2;
|
||||
__le32 rx_location_info; /* %RX_LOCATION_INFO_ */
|
||||
} __packed;
|
||||
|
||||
enum rx_phy_ppdu_end_info0 {
|
||||
RX_PHY_PPDU_END_INFO0_ERR_RADAR = BIT(2),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT = BIT(3),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_RX_NAP = BIT(4),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING = BIT(5),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY = BIT(6),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE = BIT(7),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH = BIT(8),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART = BIT(9),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE = BIT(10),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER = BIT(12),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING = BIT(13),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC = BIT(14),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE = BIT(15),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH = BIT(16),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART = BIT(17),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE = BIT(18),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP = BIT(19),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_HT_CRC = BIT(20),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH = BIT(21),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_HT_RATE = BIT(22),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF = BIT(23),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD = BIT(25),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN = BIT(26),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW = BIT(27),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC = BIT(29),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA = BIT(30),
|
||||
RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG = BIT(31),
|
||||
};
|
||||
|
||||
enum rx_phy_ppdu_end_info1 {
|
||||
RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP = BIT(0),
|
||||
RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM = BIT(1),
|
||||
RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM = BIT(2),
|
||||
RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0 = BIT(3),
|
||||
RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4),
|
||||
RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63 = BIT(5),
|
||||
RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER = BIT(6),
|
||||
RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP = BIT(7),
|
||||
RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT = BIT(8),
|
||||
RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK = BIT(9),
|
||||
RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION = BIT(10),
|
||||
RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK = BIT(11),
|
||||
RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX = BIT(12),
|
||||
RX_PHY_PPDU_END_INFO1_ERR_RX_CBF = BIT(13),
|
||||
};
|
||||
|
||||
struct rx_phy_ppdu_end {
|
||||
__le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */
|
||||
__le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */
|
||||
} __packed;
|
||||
|
||||
#define RX_PPDU_END_RX_TIMING_OFFSET_MASK 0x00000fff
|
||||
#define RX_PPDU_END_RX_TIMING_OFFSET_LSB 0
|
||||
|
||||
#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK 0x00ffffff
|
||||
#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB 0
|
||||
#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK BIT(24)
|
||||
#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID BIT(25)
|
||||
#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID BIT(26)
|
||||
#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27)
|
||||
#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL BIT(28)
|
||||
#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC BIT(29)
|
||||
#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE BIT(30)
|
||||
|
||||
struct rx_ppdu_end_qca99x0 {
|
||||
struct rx_pkt_end rx_pkt_end;
|
||||
struct rx_phy_ppdu_end rx_phy_ppdu_end;
|
||||
__le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
|
||||
__le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
|
||||
__le16 bb_length;
|
||||
__le16 info1; /* %RX_PPDU_END_INFO1_ */
|
||||
} __packed;
|
||||
|
||||
struct rx_ppdu_end {
|
||||
struct rx_ppdu_end_common common;
|
||||
union {
|
||||
struct rx_ppdu_end_qca988x qca988x;
|
||||
struct rx_ppdu_end_qca6174 qca6174;
|
||||
struct rx_ppdu_end_qca99x0 qca99x0;
|
||||
} __packed;
|
||||
} __packed;
|
||||
|
||||
|
@ -53,8 +53,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
||||
struct ath10k_skb_cb *skb_cb;
|
||||
struct sk_buff *msdu;
|
||||
|
||||
lockdep_assert_held(&htt->tx_lock);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_HTT,
|
||||
"htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
|
||||
tx_done->msdu_id, !!tx_done->discard,
|
||||
@ -66,12 +64,19 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
|
||||
if (!msdu) {
|
||||
ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
|
||||
tx_done->msdu_id);
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
return;
|
||||
}
|
||||
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
|
||||
__ath10k_htt_tx_dec_pending(htt);
|
||||
if (htt->num_pending_tx == 0)
|
||||
wake_up(&htt->empty_tx_wq);
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
|
||||
skb_cb = ATH10K_SKB_CB(msdu);
|
||||
|
||||
@ -90,7 +95,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
||||
|
||||
if (tx_done->discard) {
|
||||
ieee80211_free_txskb(htt->ar->hw, msdu);
|
||||
goto exit;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
|
||||
@ -104,12 +109,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
||||
|
||||
ieee80211_tx_status(htt->ar->hw, msdu);
|
||||
/* we do not own the msdu anymore */
|
||||
|
||||
exit:
|
||||
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
|
||||
__ath10k_htt_tx_dec_pending(htt);
|
||||
if (htt->num_pending_tx == 0)
|
||||
wake_up(&htt->empty_tx_wq);
|
||||
}
|
||||
|
||||
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
|
||||
|
@ -49,6 +49,7 @@ struct wmi_ops {
|
||||
struct wmi_roam_ev_arg *arg);
|
||||
int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
|
||||
struct wmi_wow_ev_arg *arg);
|
||||
enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
|
||||
|
||||
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
|
||||
struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
|
||||
@ -319,6 +320,15 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
|
||||
return ar->wmi.ops->pull_wow_event(ar, skb, arg);
|
||||
}
|
||||
|
||||
static inline enum wmi_txbf_conf
|
||||
ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
|
||||
{
|
||||
if (!ar->wmi.ops->get_txbf_conf_scheme)
|
||||
return WMI_TXBF_CONF_UNSUPPORTED;
|
||||
|
||||
return ar->wmi.ops->get_txbf_conf_scheme(ar);
|
||||
}
|
||||
|
||||
static inline int
|
||||
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
|
||||
{
|
||||
|
@ -519,7 +519,7 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
break;
|
||||
case WMI_TLV_SERVICE_READY_EVENTID:
|
||||
ath10k_wmi_event_service_ready(ar, skb);
|
||||
break;
|
||||
return;
|
||||
case WMI_TLV_READY_EVENTID:
|
||||
ath10k_wmi_event_ready(ar, skb);
|
||||
break;
|
||||
@ -1279,6 +1279,11 @@ ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
|
||||
return skb;
|
||||
}
|
||||
|
||||
static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
|
||||
{
|
||||
return WMI_TXBF_CONF_AFTER_ASSOC;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
|
||||
u32 param_value)
|
||||
@ -1373,7 +1378,7 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
|
||||
cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
|
||||
cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
|
||||
cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
|
||||
cfg->rx_decap_mode = __cpu_to_le32(1);
|
||||
cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
|
||||
cfg->scan_max_pending_reqs = __cpu_to_le32(4);
|
||||
cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
|
||||
cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
|
||||
@ -3408,6 +3413,7 @@ static const struct wmi_ops wmi_tlv_ops = {
|
||||
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
|
||||
.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
|
||||
.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
|
||||
.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
|
||||
|
||||
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
|
||||
.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
|
||||
|
@ -3122,6 +3122,11 @@ static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
|
||||
{
|
||||
return WMI_TXBF_CONF_BEFORE_ASSOC;
|
||||
}
|
||||
|
||||
void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct wmi_swba_ev_arg arg = {};
|
||||
@ -3498,7 +3503,7 @@ void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
|
||||
fftr, fftr_len,
|
||||
tsf);
|
||||
if (res < 0) {
|
||||
ath10k_warn(ar, "failed to process fft report: %d\n",
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
|
||||
res);
|
||||
return;
|
||||
}
|
||||
@ -3789,7 +3794,7 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
|
||||
ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
|
||||
pool_size,
|
||||
&paddr,
|
||||
GFP_ATOMIC);
|
||||
GFP_KERNEL);
|
||||
if (!ar->wmi.mem_chunks[idx].vaddr) {
|
||||
ath10k_warn(ar, "failed to allocate memory chunk\n");
|
||||
return -ENOMEM;
|
||||
@ -3878,12 +3883,19 @@ ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
|
||||
static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
|
||||
{
|
||||
struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
|
||||
struct sk_buff *skb = ar->svc_rdy_skb;
|
||||
struct wmi_svc_rdy_ev_arg arg = {};
|
||||
u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
|
||||
int ret;
|
||||
|
||||
if (!skb) {
|
||||
ath10k_warn(ar, "invalid service ready event skb\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
|
||||
@ -4003,9 +4015,17 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
|
||||
__le32_to_cpu(arg.eeprom_rd),
|
||||
__le32_to_cpu(arg.num_mem_reqs));
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
ar->svc_rdy_skb = NULL;
|
||||
complete(&ar->wmi.service_ready);
|
||||
}
|
||||
|
||||
void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
ar->svc_rdy_skb = skb;
|
||||
queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
|
||||
}
|
||||
|
||||
static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
|
||||
struct wmi_rdy_ev_arg *arg)
|
||||
{
|
||||
@ -4177,7 +4197,7 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
break;
|
||||
case WMI_SERVICE_READY_EVENTID:
|
||||
ath10k_wmi_event_service_ready(ar, skb);
|
||||
break;
|
||||
return;
|
||||
case WMI_READY_EVENTID:
|
||||
ath10k_wmi_event_ready(ar, skb);
|
||||
break;
|
||||
@ -4298,7 +4318,7 @@ static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
break;
|
||||
case WMI_10X_SERVICE_READY_EVENTID:
|
||||
ath10k_wmi_event_service_ready(ar, skb);
|
||||
break;
|
||||
return;
|
||||
case WMI_10X_READY_EVENTID:
|
||||
ath10k_wmi_event_ready(ar, skb);
|
||||
break;
|
||||
@ -4409,7 +4429,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
break;
|
||||
case WMI_10_2_SERVICE_READY_EVENTID:
|
||||
ath10k_wmi_event_service_ready(ar, skb);
|
||||
break;
|
||||
return;
|
||||
case WMI_10_2_READY_EVENTID:
|
||||
ath10k_wmi_event_ready(ar, skb);
|
||||
break;
|
||||
@ -4461,7 +4481,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
||||
break;
|
||||
case WMI_10_4_SERVICE_READY_EVENTID:
|
||||
ath10k_wmi_event_service_ready(ar, skb);
|
||||
break;
|
||||
return;
|
||||
case WMI_10_4_SCAN_EVENTID:
|
||||
ath10k_wmi_event_scan(ar, skb);
|
||||
break;
|
||||
@ -4688,8 +4708,7 @@ static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
|
||||
config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
|
||||
config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
|
||||
config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
|
||||
config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
|
||||
|
||||
config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
|
||||
config.scan_max_pending_reqs =
|
||||
__cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
|
||||
|
||||
@ -4757,8 +4776,7 @@ static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
|
||||
config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
|
||||
config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
|
||||
config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
|
||||
config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
|
||||
|
||||
config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
|
||||
config.scan_max_pending_reqs =
|
||||
__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
|
||||
|
||||
@ -4823,7 +4841,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
|
||||
config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
|
||||
config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
|
||||
config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
|
||||
config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
|
||||
config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
|
||||
|
||||
config.scan_max_pending_reqs =
|
||||
__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
|
||||
@ -6431,6 +6449,7 @@ static const struct wmi_ops wmi_10_4_ops = {
|
||||
.pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
|
||||
.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
|
||||
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
|
||||
.get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
|
||||
|
||||
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
|
||||
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
|
||||
@ -6514,6 +6533,8 @@ int ath10k_wmi_attach(struct ath10k *ar)
|
||||
init_completion(&ar->wmi.service_ready);
|
||||
init_completion(&ar->wmi.unified_ready);
|
||||
|
||||
INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -6521,6 +6542,11 @@ void ath10k_wmi_detach(struct ath10k *ar)
|
||||
{
|
||||
int i;
|
||||
|
||||
cancel_work_sync(&ar->svc_rdy_work);
|
||||
|
||||
if (ar->svc_rdy_skb)
|
||||
dev_kfree_skb(ar->svc_rdy_skb);
|
||||
|
||||
/* free the host memory chunks requested by firmware */
|
||||
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
|
||||
dma_free_coherent(ar->dev,
|
||||
|
@ -4628,6 +4628,11 @@ enum wmi_10_4_vdev_param {
|
||||
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
|
||||
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
|
||||
|
||||
#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
|
||||
#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0
|
||||
#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
|
||||
#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00
|
||||
|
||||
/* slot time long */
|
||||
#define WMI_VDEV_SLOT_TIME_LONG 0x1
|
||||
/* slot time short */
|
||||
@ -6008,6 +6013,12 @@ struct wmi_tdls_peer_capab_arg {
|
||||
u32 pref_offchan_bw;
|
||||
};
|
||||
|
||||
enum wmi_txbf_conf {
|
||||
WMI_TXBF_CONF_UNSUPPORTED,
|
||||
WMI_TXBF_CONF_BEFORE_ASSOC,
|
||||
WMI_TXBF_CONF_AFTER_ASSOC,
|
||||
};
|
||||
|
||||
struct ath10k;
|
||||
struct ath10k_vif;
|
||||
struct ath10k_fw_stats_pdev;
|
||||
|
@ -301,8 +301,26 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
|
||||
ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
|
||||
|
||||
exit:
|
||||
if (ret) {
|
||||
switch (ar->state) {
|
||||
case ATH10K_STATE_ON:
|
||||
ar->state = ATH10K_STATE_RESTARTING;
|
||||
ret = 1;
|
||||
break;
|
||||
case ATH10K_STATE_OFF:
|
||||
case ATH10K_STATE_RESTARTING:
|
||||
case ATH10K_STATE_RESTARTED:
|
||||
case ATH10K_STATE_UTF:
|
||||
case ATH10K_STATE_WEDGED:
|
||||
ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n",
|
||||
ar->state);
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return ret ? 1 : 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ath10k_wow_init(struct ath10k *ar)
|
||||
|
@ -427,7 +427,7 @@ struct htc_endpoint_credit_dist {
|
||||
};
|
||||
|
||||
/*
|
||||
* credit distibution code that is passed into the distrbution function,
|
||||
* credit distribution code that is passed into the distribution function,
|
||||
* there are mandatory and optional codes that must be handled
|
||||
*/
|
||||
enum htc_credit_dist_reason {
|
||||
|
@ -12,6 +12,7 @@ wil6210-y += debug.o
|
||||
wil6210-y += rx_reorder.o
|
||||
wil6210-y += ioctl.o
|
||||
wil6210-y += fw.o
|
||||
wil6210-y += pm.o
|
||||
wil6210-y += pmc.o
|
||||
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
|
||||
wil6210-y += wil_platform.o
|
||||
|
61
drivers/net/wireless/ath/wil6210/boot_loader.h
Normal file
61
drivers/net/wireless/ath/wil6210/boot_loader.h
Normal file
@ -0,0 +1,61 @@
|
||||
/* Copyright (c) 2015 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/* This file contains the definitions for the boot loader
|
||||
* for the Qualcomm "Sparrow" 60 Gigabit wireless solution.
|
||||
*/
|
||||
#ifndef BOOT_LOADER_EXPORT_H_
|
||||
#define BOOT_LOADER_EXPORT_H_
|
||||
|
||||
struct bl_dedicated_registers_v1 {
|
||||
__le32 boot_loader_ready; /* 0x880A3C driver will poll
|
||||
* this Dword until BL will
|
||||
* set it to 1 (initial value
|
||||
* should be 0)
|
||||
*/
|
||||
__le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */
|
||||
__le16 rf_type; /* 0x880A44 connected RF ID */
|
||||
__le16 rf_status; /* 0x880A46 RF status,
|
||||
* 0 is OK else error
|
||||
*/
|
||||
__le32 baseband_type; /* 0x880A48 board type ID */
|
||||
u8 mac_address[6]; /* 0x880A4c BL mac address */
|
||||
u8 bl_version_major; /* 0x880A52 BL ver. major */
|
||||
u8 bl_version_minor; /* 0x880A53 BL ver. minor */
|
||||
__le16 bl_version_subminor; /* 0x880A54 BL ver. subminor */
|
||||
__le16 bl_version_build; /* 0x880A56 BL ver. build */
|
||||
/* valid only for version 2 and above */
|
||||
__le32 bl_assert_code; /* 0x880A58 BL Assert code */
|
||||
__le32 bl_assert_blink; /* 0x880A5C BL Assert Branch */
|
||||
__le32 bl_reserved[22]; /* 0x880A60 - 0x880AB4 */
|
||||
__le32 bl_magic_number; /* 0x880AB8 BL Magic number */
|
||||
} __packed;
|
||||
|
||||
/* the following struct is the version 0 struct */
|
||||
|
||||
struct bl_dedicated_registers_v0 {
|
||||
__le32 boot_loader_ready; /* 0x880A3C driver will poll
|
||||
* this Dword until BL will
|
||||
* set it to 1 (initial value
|
||||
* should be 0)
|
||||
*/
|
||||
#define BL_READY (1) /* ready indication */
|
||||
__le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */
|
||||
__le32 rf_type; /* 0x880A44 connected RF ID */
|
||||
__le32 baseband_type; /* 0x880A48 board type ID */
|
||||
u8 mac_address[6]; /* 0x880A4c BL mac address */
|
||||
} __packed;
|
||||
|
||||
#endif /* BOOT_LOADER_EXPORT_H_ */
|
@ -336,12 +336,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
|
||||
else
|
||||
wil_dbg_misc(wil, "Scan has no IE's\n");
|
||||
|
||||
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len,
|
||||
request->ie);
|
||||
if (rc) {
|
||||
wil_err(wil, "Aborting scan, set_ie failed: %d\n", rc);
|
||||
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
|
||||
cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
|
||||
@ -462,10 +459,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
|
||||
* ies in FW.
|
||||
*/
|
||||
rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
|
||||
if (rc) {
|
||||
wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* WMI_CONNECT_CMD */
|
||||
memset(&conn, 0, sizeof(conn));
|
||||
@ -722,56 +717,52 @@ static int wil_fix_bcon(struct wil6210_priv *wil,
|
||||
{
|
||||
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
|
||||
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
|
||||
int rc = 0;
|
||||
|
||||
if (bcon->probe_resp_len <= hlen)
|
||||
return 0;
|
||||
|
||||
/* always use IE's from full probe frame, they has more info
|
||||
* notable RSN
|
||||
*/
|
||||
bcon->proberesp_ies = f->u.probe_resp.variable;
|
||||
bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
|
||||
if (!bcon->assocresp_ies) {
|
||||
bcon->assocresp_ies = f->u.probe_resp.variable;
|
||||
bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
|
||||
rc = 1;
|
||||
bcon->assocresp_ies = bcon->proberesp_ies;
|
||||
bcon->assocresp_ies_len = bcon->proberesp_ies_len;
|
||||
}
|
||||
|
||||
return rc;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* internal functions for device reset and starting AP */
|
||||
static int _wil_cfg80211_set_ies(struct wiphy *wiphy,
|
||||
size_t probe_ies_len, const u8 *probe_ies,
|
||||
size_t assoc_ies_len, const u8 *assoc_ies)
|
||||
|
||||
struct cfg80211_beacon_data *bcon)
|
||||
{
|
||||
int rc;
|
||||
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
|
||||
|
||||
/* FW do not form regular beacon, so bcon IE's are not set
|
||||
* For the DMG bcon, when it will be supported, bcon IE's will
|
||||
* be reused; add something like:
|
||||
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
|
||||
* bcon->beacon_ies);
|
||||
*/
|
||||
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, probe_ies_len, probe_ies);
|
||||
if (rc) {
|
||||
wil_err(wil, "set_ie(PROBE_RESP) failed\n");
|
||||
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
|
||||
bcon->proberesp_ies);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, assoc_ies_len, assoc_ies);
|
||||
if (rc) {
|
||||
wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
|
||||
rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
|
||||
bcon->assocresp_ies);
|
||||
#if 0 /* to use beacon IE's, remove this #if 0 */
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail);
|
||||
#endif
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
|
||||
struct net_device *ndev,
|
||||
const u8 *ssid, size_t ssid_len, u32 privacy,
|
||||
int bi, u8 chan,
|
||||
size_t probe_ies_len, const u8 *probe_ies,
|
||||
size_t assoc_ies_len, const u8 *assoc_ies,
|
||||
struct cfg80211_beacon_data *bcon,
|
||||
u8 hidden_ssid)
|
||||
{
|
||||
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
|
||||
@ -792,8 +783,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = _wil_cfg80211_set_ies(wiphy, probe_ies_len, probe_ies,
|
||||
assoc_ies_len, assoc_ies);
|
||||
rc = _wil_cfg80211_set_ies(wiphy, bcon);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
@ -827,27 +817,20 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
|
||||
struct cfg80211_beacon_data *bcon)
|
||||
{
|
||||
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
|
||||
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
|
||||
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
|
||||
const u8 *pr_ies = NULL;
|
||||
size_t pr_ies_len = 0;
|
||||
int rc;
|
||||
u32 privacy = 0;
|
||||
|
||||
wil_dbg_misc(wil, "%s()\n", __func__);
|
||||
wil_print_bcon_data(bcon);
|
||||
|
||||
if (bcon->probe_resp_len > hlen) {
|
||||
pr_ies = f->u.probe_resp.variable;
|
||||
pr_ies_len = bcon->probe_resp_len - hlen;
|
||||
}
|
||||
|
||||
if (wil_fix_bcon(wil, bcon)) {
|
||||
wil_dbg_misc(wil, "Fixed bcon\n");
|
||||
wil_print_bcon_data(bcon);
|
||||
}
|
||||
|
||||
if (pr_ies && cfg80211_find_ie(WLAN_EID_RSN, pr_ies, pr_ies_len))
|
||||
if (bcon->proberesp_ies &&
|
||||
cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies,
|
||||
bcon->proberesp_ies_len))
|
||||
privacy = 1;
|
||||
|
||||
/* in case privacy has changed, need to restart the AP */
|
||||
@ -860,14 +843,10 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
|
||||
rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
|
||||
wdev->ssid_len, privacy,
|
||||
wdev->beacon_interval,
|
||||
wil->channel, pr_ies_len, pr_ies,
|
||||
bcon->assocresp_ies_len,
|
||||
bcon->assocresp_ies,
|
||||
wil->channel, bcon,
|
||||
wil->hidden_ssid);
|
||||
} else {
|
||||
rc = _wil_cfg80211_set_ies(wiphy, pr_ies_len, pr_ies,
|
||||
bcon->assocresp_ies_len,
|
||||
bcon->assocresp_ies);
|
||||
rc = _wil_cfg80211_set_ies(wiphy, bcon);
|
||||
}
|
||||
|
||||
return rc;
|
||||
@ -882,10 +861,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
|
||||
struct ieee80211_channel *channel = info->chandef.chan;
|
||||
struct cfg80211_beacon_data *bcon = &info->beacon;
|
||||
struct cfg80211_crypto_settings *crypto = &info->crypto;
|
||||
struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
|
||||
size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
|
||||
const u8 *pr_ies = NULL;
|
||||
size_t pr_ies_len = 0;
|
||||
u8 hidden_ssid;
|
||||
|
||||
wil_dbg_misc(wil, "%s()\n", __func__);
|
||||
@ -925,11 +900,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
|
||||
wil_print_bcon_data(bcon);
|
||||
wil_print_crypto(wil, crypto);
|
||||
|
||||
if (bcon->probe_resp_len > hlen) {
|
||||
pr_ies = f->u.probe_resp.variable;
|
||||
pr_ies_len = bcon->probe_resp_len - hlen;
|
||||
}
|
||||
|
||||
if (wil_fix_bcon(wil, bcon)) {
|
||||
wil_dbg_misc(wil, "Fixed bcon\n");
|
||||
wil_print_bcon_data(bcon);
|
||||
@ -938,10 +908,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
|
||||
rc = _wil_cfg80211_start_ap(wiphy, ndev,
|
||||
info->ssid, info->ssid_len, info->privacy,
|
||||
info->beacon_interval, channel->hw_value,
|
||||
pr_ies_len, pr_ies,
|
||||
bcon->assocresp_ies_len,
|
||||
bcon->assocresp_ies,
|
||||
hidden_ssid);
|
||||
bcon, hidden_ssid);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
|
||||
seq_printf(s, " swhead = %d\n", vring->swhead);
|
||||
seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
|
||||
if (x) {
|
||||
v = ioread32(x);
|
||||
v = readl(x);
|
||||
seq_printf(s, "0x%08x = %d\n", v, v);
|
||||
} else {
|
||||
seq_puts(s, "???\n");
|
||||
@ -268,7 +268,7 @@ static const struct file_operations fops_mbox = {
|
||||
|
||||
static int wil_debugfs_iomem_x32_set(void *data, u64 val)
|
||||
{
|
||||
iowrite32(val, (void __iomem *)data);
|
||||
writel(val, (void __iomem *)data);
|
||||
wmb(); /* make sure write propagated to HW */
|
||||
|
||||
return 0;
|
||||
@ -276,7 +276,7 @@ static int wil_debugfs_iomem_x32_set(void *data, u64 val)
|
||||
|
||||
static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
|
||||
{
|
||||
*val = ioread32((void __iomem *)data);
|
||||
*val = readl((void __iomem *)data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -306,7 +306,7 @@ static int wil_debugfs_ulong_get(void *data, u64 *val)
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
|
||||
wil_debugfs_ulong_set, "%llu\n");
|
||||
wil_debugfs_ulong_set, "0x%llx\n");
|
||||
|
||||
static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
@ -477,7 +477,7 @@ static int wil_memread_debugfs_show(struct seq_file *s, void *data)
|
||||
void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
|
||||
|
||||
if (a)
|
||||
seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
|
||||
seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
|
||||
else
|
||||
seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
|
||||
|
||||
@ -1344,6 +1344,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
|
||||
{
|
||||
int i;
|
||||
u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
|
||||
unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old;
|
||||
|
||||
seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout,
|
||||
r->head_seq_num);
|
||||
@ -1353,7 +1354,10 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
|
||||
else
|
||||
seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
|
||||
}
|
||||
seq_printf(s, "] last drop 0x%03x\n", r->ssn_last_drop);
|
||||
seq_printf(s,
|
||||
"] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n",
|
||||
r->total, drop_dup + drop_old, drop_dup, drop_old,
|
||||
r->ssn_last_drop);
|
||||
}
|
||||
|
||||
static int wil_sta_debugfs_show(struct seq_file *s, void *data)
|
||||
|
@ -50,19 +50,13 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
|
||||
|
||||
wil_dbg_misc(wil, "%s()\n", __func__);
|
||||
|
||||
tx_itr_en = ioread32(wil->csr +
|
||||
HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
|
||||
tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
|
||||
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
|
||||
tx_itr_val =
|
||||
ioread32(wil->csr +
|
||||
HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
|
||||
tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
|
||||
|
||||
rx_itr_en = ioread32(wil->csr +
|
||||
HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
|
||||
rx_itr_en = wil_r(wil, RGF_DMA_ITR_RX_CNT_CTL);
|
||||
if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
|
||||
rx_itr_val =
|
||||
ioread32(wil->csr +
|
||||
HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
|
||||
rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
|
||||
|
||||
cp->tx_coalesce_usecs = tx_itr_val;
|
||||
cp->rx_coalesce_usecs = rx_itr_val;
|
||||
|
@ -22,16 +22,6 @@
|
||||
MODULE_FIRMWARE(WIL_FW_NAME);
|
||||
MODULE_FIRMWARE(WIL_FW2_NAME);
|
||||
|
||||
/* target operations */
|
||||
/* register read */
|
||||
#define R(a) ioread32(wil->csr + HOSTADDR(a))
|
||||
/* register write. wmb() to make sure it is completed */
|
||||
#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
|
||||
/* register set = read, OR, write */
|
||||
#define S(a, v) W(a, R(a) | v)
|
||||
/* register clear = read, AND with inverted, write */
|
||||
#define C(a, v) W(a, R(a) & ~v)
|
||||
|
||||
static
|
||||
void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
|
||||
size_t count)
|
||||
|
@ -221,12 +221,12 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
|
||||
|
||||
FW_ADDR_CHECK(dst, block[i].addr, "address");
|
||||
|
||||
x = ioread32(dst);
|
||||
x = readl(dst);
|
||||
y = (x & m) | (v & ~m);
|
||||
wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x "
|
||||
"(old 0x%08x val 0x%08x mask 0x%08x)\n",
|
||||
le32_to_cpu(block[i].addr), y, x, v, m);
|
||||
iowrite32(y, dst);
|
||||
writel(y, dst);
|
||||
wmb(); /* finish before processing next record */
|
||||
}
|
||||
|
||||
@ -239,18 +239,18 @@ static int gw_write(struct wil6210_priv *wil, void __iomem *gwa_addr,
|
||||
{
|
||||
unsigned delay = 0;
|
||||
|
||||
iowrite32(a, gwa_addr);
|
||||
iowrite32(gw_cmd, gwa_cmd);
|
||||
writel(a, gwa_addr);
|
||||
writel(gw_cmd, gwa_cmd);
|
||||
wmb(); /* finish before activate gw */
|
||||
|
||||
iowrite32(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
|
||||
writel(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
|
||||
do {
|
||||
udelay(1); /* typical time is few usec */
|
||||
if (delay++ > 100) {
|
||||
wil_err_fw(wil, "gw timeout\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} while (ioread32(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
|
||||
} while (readl(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -305,7 +305,7 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
|
||||
wil_dbg_fw(wil, " gw write[%3d] [0x%08x] <== 0x%08x\n",
|
||||
i, a, v);
|
||||
|
||||
iowrite32(v, gwa_val);
|
||||
writel(v, gwa_val);
|
||||
rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -372,7 +372,7 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
|
||||
sizeof(v), false);
|
||||
|
||||
for (k = 0; k < ARRAY_SIZE(block->value); k++)
|
||||
iowrite32(v[k], gwa_val[k]);
|
||||
writel(v[k], gwa_val[k]);
|
||||
rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@ -61,13 +61,13 @@ static inline void wil_icr_clear(u32 x, void __iomem *addr)
|
||||
|
||||
static inline void wil_icr_clear(u32 x, void __iomem *addr)
|
||||
{
|
||||
iowrite32(x, addr);
|
||||
writel(x, addr);
|
||||
}
|
||||
#endif /* defined(CONFIG_WIL6210_ISR_COR) */
|
||||
|
||||
static inline u32 wil_ioread32_and_clear(void __iomem *addr)
|
||||
{
|
||||
u32 x = ioread32(addr);
|
||||
u32 x = readl(addr);
|
||||
|
||||
wil_icr_clear(x, addr);
|
||||
|
||||
@ -76,54 +76,47 @@ static inline u32 wil_ioread32_and_clear(void __iomem *addr)
|
||||
|
||||
static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
|
||||
{
|
||||
iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_TX_ICR) +
|
||||
offsetof(struct RGF_ICR, IMS));
|
||||
wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMS),
|
||||
WIL6210_IRQ_DISABLE);
|
||||
}
|
||||
|
||||
static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
|
||||
{
|
||||
iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_RX_ICR) +
|
||||
offsetof(struct RGF_ICR, IMS));
|
||||
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
|
||||
WIL6210_IRQ_DISABLE);
|
||||
}
|
||||
|
||||
static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
|
||||
{
|
||||
iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
|
||||
offsetof(struct RGF_ICR, IMS));
|
||||
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
|
||||
WIL6210_IRQ_DISABLE);
|
||||
}
|
||||
|
||||
static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
|
||||
{
|
||||
wil_dbg_irq(wil, "%s()\n", __func__);
|
||||
|
||||
iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
|
||||
HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
|
||||
wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
|
||||
|
||||
clear_bit(wil_status_irqen, wil->status);
|
||||
}
|
||||
|
||||
void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
|
||||
{
|
||||
iowrite32(WIL6210_IMC_TX, wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_TX_ICR) +
|
||||
offsetof(struct RGF_ICR, IMC));
|
||||
wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMC),
|
||||
WIL6210_IMC_TX);
|
||||
}
|
||||
|
||||
void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
|
||||
{
|
||||
iowrite32(WIL6210_IMC_RX, wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_RX_ICR) +
|
||||
offsetof(struct RGF_ICR, IMC));
|
||||
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
|
||||
WIL6210_IMC_RX);
|
||||
}
|
||||
|
||||
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
|
||||
{
|
||||
iowrite32(WIL6210_IMC_MISC, wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
|
||||
offsetof(struct RGF_ICR, IMC));
|
||||
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
|
||||
WIL6210_IMC_MISC);
|
||||
}
|
||||
|
||||
static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
|
||||
@ -132,8 +125,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
|
||||
|
||||
set_bit(wil_status_irqen, wil->status);
|
||||
|
||||
iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
|
||||
HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
|
||||
wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_PSEUDO_MASK);
|
||||
}
|
||||
|
||||
void wil_mask_irq(struct wil6210_priv *wil)
|
||||
@ -150,12 +142,12 @@ void wil_unmask_irq(struct wil6210_priv *wil)
|
||||
{
|
||||
wil_dbg_irq(wil, "%s()\n", __func__);
|
||||
|
||||
iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
|
||||
offsetof(struct RGF_ICR, ICC));
|
||||
iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
|
||||
offsetof(struct RGF_ICR, ICC));
|
||||
iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
|
||||
offsetof(struct RGF_ICR, ICC));
|
||||
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
|
||||
WIL_ICR_ICC_VALUE);
|
||||
wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
|
||||
WIL_ICR_ICC_VALUE);
|
||||
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
|
||||
WIL_ICR_ICC_VALUE);
|
||||
|
||||
wil6210_unmask_irq_pseudo(wil);
|
||||
wil6210_unmask_irq_tx(wil);
|
||||
@ -163,9 +155,6 @@ void wil_unmask_irq(struct wil6210_priv *wil)
|
||||
wil6210_unmask_irq_misc(wil);
|
||||
}
|
||||
|
||||
/* target write operation */
|
||||
#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
|
||||
|
||||
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
|
||||
{
|
||||
wil_dbg_irq(wil, "%s()\n", __func__);
|
||||
@ -177,44 +166,42 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
|
||||
return;
|
||||
|
||||
/* Disable and clear tx counter before (re)configuration */
|
||||
W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
|
||||
W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
|
||||
wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
|
||||
wil_w(wil, RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
|
||||
wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
|
||||
wil->tx_max_burst_duration);
|
||||
/* Configure TX max burst duration timer to use usec units */
|
||||
W(RGF_DMA_ITR_TX_CNT_CTL,
|
||||
BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
|
||||
wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL,
|
||||
BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
|
||||
|
||||
/* Disable and clear tx idle counter before (re)configuration */
|
||||
W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
|
||||
W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
|
||||
wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
|
||||
wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
|
||||
wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
|
||||
wil->tx_interframe_timeout);
|
||||
/* Configure TX max burst duration timer to use usec units */
|
||||
W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
|
||||
BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
|
||||
wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
|
||||
BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
|
||||
|
||||
/* Disable and clear rx counter before (re)configuration */
|
||||
W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
|
||||
W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
|
||||
wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
|
||||
wil_w(wil, RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
|
||||
wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
|
||||
wil->rx_max_burst_duration);
|
||||
/* Configure TX max burst duration timer to use usec units */
|
||||
W(RGF_DMA_ITR_RX_CNT_CTL,
|
||||
BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
|
||||
wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL,
|
||||
BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
|
||||
|
||||
/* Disable and clear rx idle counter before (re)configuration */
|
||||
W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
|
||||
W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
|
||||
wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
|
||||
wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
|
||||
wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
|
||||
wil->rx_interframe_timeout);
|
||||
/* Configure TX max burst duration timer to use usec units */
|
||||
W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
|
||||
BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
|
||||
wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
|
||||
BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
|
||||
}
|
||||
|
||||
#undef W
|
||||
|
||||
static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
|
||||
{
|
||||
struct wil6210_priv *wil = cookie;
|
||||
@ -452,27 +439,24 @@ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
|
||||
u32 icr_rx = wil_ioread32_and_clear(wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_RX_ICR) +
|
||||
offsetof(struct RGF_ICR, ICR));
|
||||
u32 imv_rx = ioread32(wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_RX_ICR) +
|
||||
offsetof(struct RGF_ICR, IMV));
|
||||
u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
|
||||
offsetof(struct RGF_ICR, IMV));
|
||||
u32 icm_tx = wil_ioread32_and_clear(wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_TX_ICR) +
|
||||
offsetof(struct RGF_ICR, ICM));
|
||||
u32 icr_tx = wil_ioread32_and_clear(wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_TX_ICR) +
|
||||
offsetof(struct RGF_ICR, ICR));
|
||||
u32 imv_tx = ioread32(wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_TX_ICR) +
|
||||
offsetof(struct RGF_ICR, IMV));
|
||||
u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
|
||||
offsetof(struct RGF_ICR, IMV));
|
||||
u32 icm_misc = wil_ioread32_and_clear(wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
|
||||
offsetof(struct RGF_ICR, ICM));
|
||||
u32 icr_misc = wil_ioread32_and_clear(wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
|
||||
offsetof(struct RGF_ICR, ICR));
|
||||
u32 imv_misc = ioread32(wil->csr +
|
||||
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
|
||||
offsetof(struct RGF_ICR, IMV));
|
||||
u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
|
||||
offsetof(struct RGF_ICR, IMV));
|
||||
wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
|
||||
"Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
|
||||
"Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
|
||||
@ -492,7 +476,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
|
||||
{
|
||||
irqreturn_t rc = IRQ_HANDLED;
|
||||
struct wil6210_priv *wil = cookie;
|
||||
u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
|
||||
u32 pseudo_cause = wil_r(wil, RGF_DMA_PSEUDO_CAUSE);
|
||||
|
||||
/**
|
||||
* pseudo_cause is Clear-On-Read, no need to ACK
|
||||
@ -541,48 +525,12 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
|
||||
{
|
||||
int rc;
|
||||
/*
|
||||
* IRQ's are in the following order:
|
||||
* - Tx
|
||||
* - Rx
|
||||
* - Misc
|
||||
*/
|
||||
|
||||
rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
|
||||
WIL_NAME"_tx", wil);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
|
||||
WIL_NAME"_rx", wil);
|
||||
if (rc)
|
||||
goto free0;
|
||||
|
||||
rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
|
||||
wil6210_irq_misc_thread,
|
||||
IRQF_SHARED, WIL_NAME"_misc", wil);
|
||||
if (rc)
|
||||
goto free1;
|
||||
|
||||
return 0;
|
||||
/* error branch */
|
||||
free1:
|
||||
free_irq(irq + 1, wil);
|
||||
free0:
|
||||
free_irq(irq, wil);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* can't use wil_ioread32_and_clear because ICC value is not set yet */
|
||||
static inline void wil_clear32(void __iomem *addr)
|
||||
{
|
||||
u32 x = ioread32(addr);
|
||||
u32 x = readl(addr);
|
||||
|
||||
iowrite32(x, addr);
|
||||
writel(x, addr);
|
||||
}
|
||||
|
||||
void wil6210_clear_irq(struct wil6210_priv *wil)
|
||||
@ -596,19 +544,16 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
|
||||
wmb(); /* make sure write completed */
|
||||
}
|
||||
|
||||
int wil6210_init_irq(struct wil6210_priv *wil, int irq)
|
||||
int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
|
||||
{
|
||||
int rc;
|
||||
|
||||
wil_dbg_misc(wil, "%s() n_msi=%d\n", __func__, wil->n_msi);
|
||||
wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx");
|
||||
|
||||
if (wil->n_msi == 3)
|
||||
rc = wil6210_request_3msi(wil, irq);
|
||||
else
|
||||
rc = request_threaded_irq(irq, wil6210_hardirq,
|
||||
wil6210_thread_irq,
|
||||
wil->n_msi ? 0 : IRQF_SHARED,
|
||||
WIL_NAME, wil);
|
||||
rc = request_threaded_irq(irq, wil6210_hardirq,
|
||||
wil6210_thread_irq,
|
||||
use_msi ? 0 : IRQF_SHARED,
|
||||
WIL_NAME, wil);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -618,8 +563,4 @@ void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
|
||||
|
||||
wil_mask_irq(wil);
|
||||
free_irq(irq, wil);
|
||||
if (wil->n_msi == 3) {
|
||||
free_irq(irq + 1, wil);
|
||||
free_irq(irq + 2, wil);
|
||||
}
|
||||
}
|
||||
|
@ -76,11 +76,11 @@ static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data)
|
||||
/* operation */
|
||||
switch (io.op & wil_mmio_op_mask) {
|
||||
case wil_mmio_read:
|
||||
io.val = ioread32(a);
|
||||
io.val = readl(a);
|
||||
need_copy = true;
|
||||
break;
|
||||
case wil_mmio_write:
|
||||
iowrite32(io.val, a);
|
||||
writel(io.val, a);
|
||||
wmb(); /* make sure write propagated to HW */
|
||||
break;
|
||||
default:
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "wil6210.h"
|
||||
#include "txrx.h"
|
||||
#include "wmi.h"
|
||||
#include "boot_loader.h"
|
||||
|
||||
#define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
|
||||
#define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
|
||||
@ -270,8 +271,7 @@ static void wil_scan_timer_fn(ulong x)
|
||||
|
||||
clear_bit(wil_status_fwready, wil->status);
|
||||
wil_err(wil, "Scan timeout detected, start fw error recovery\n");
|
||||
wil->recovery_state = fw_recovery_pending;
|
||||
schedule_work(&wil->fw_error_worker);
|
||||
wil_fw_error_recovery(wil);
|
||||
}
|
||||
|
||||
static int wil_wait_for_recovery(struct wil6210_priv *wil)
|
||||
@ -528,26 +528,16 @@ void wil_priv_deinit(struct wil6210_priv *wil)
|
||||
destroy_workqueue(wil->wmi_wq);
|
||||
}
|
||||
|
||||
/* target operations */
|
||||
/* register read */
|
||||
#define R(a) ioread32(wil->csr + HOSTADDR(a))
|
||||
/* register write. wmb() to make sure it is completed */
|
||||
#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
|
||||
/* register set = read, OR, write */
|
||||
#define S(a, v) W(a, R(a) | v)
|
||||
/* register clear = read, AND with inverted, write */
|
||||
#define C(a, v) W(a, R(a) & ~v)
|
||||
|
||||
static inline void wil_halt_cpu(struct wil6210_priv *wil)
|
||||
{
|
||||
W(RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
|
||||
W(RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
|
||||
wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
|
||||
wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
|
||||
}
|
||||
|
||||
static inline void wil_release_cpu(struct wil6210_priv *wil)
|
||||
{
|
||||
/* Start CPU */
|
||||
W(RGF_USER_USER_CPU_0, 1);
|
||||
wil_w(wil, RGF_USER_USER_CPU_0, 1);
|
||||
}
|
||||
|
||||
static int wil_target_reset(struct wil6210_priv *wil)
|
||||
@ -558,56 +548,60 @@ static int wil_target_reset(struct wil6210_priv *wil)
|
||||
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
|
||||
|
||||
/* Clear MAC link up */
|
||||
S(RGF_HP_CTRL, BIT(15));
|
||||
S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
|
||||
S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
|
||||
wil_s(wil, RGF_HP_CTRL, BIT(15));
|
||||
wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
|
||||
wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
|
||||
|
||||
wil_halt_cpu(wil);
|
||||
|
||||
/* clear all boot loader "ready" bits */
|
||||
W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0);
|
||||
wil_w(wil, RGF_USER_BL +
|
||||
offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0);
|
||||
/* Clear Fw Download notification */
|
||||
C(RGF_USER_USAGE_6, BIT(0));
|
||||
wil_c(wil, RGF_USER_USAGE_6, BIT(0));
|
||||
|
||||
S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
|
||||
wil_s(wil, RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
|
||||
/* XTAL stabilization should take about 3ms */
|
||||
usleep_range(5000, 7000);
|
||||
x = R(RGF_CAF_PLL_LOCK_STATUS);
|
||||
x = wil_r(wil, RGF_CAF_PLL_LOCK_STATUS);
|
||||
if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
|
||||
wil_err(wil, "Xtal stabilization timeout\n"
|
||||
"RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
|
||||
return -ETIME;
|
||||
}
|
||||
/* switch 10k to XTAL*/
|
||||
C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
|
||||
wil_c(wil, RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
|
||||
/* 40 MHz */
|
||||
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
|
||||
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
|
||||
|
||||
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
|
||||
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
|
||||
|
||||
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
|
||||
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
|
||||
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
|
||||
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
|
||||
|
||||
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
|
||||
W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
|
||||
|
||||
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
|
||||
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
|
||||
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
|
||||
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
|
||||
|
||||
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
|
||||
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
|
||||
/* reset A2 PCIE AHB */
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
|
||||
|
||||
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
|
||||
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
|
||||
|
||||
/* wait until device ready. typical time is 20..80 msec */
|
||||
do {
|
||||
msleep(RST_DELAY);
|
||||
x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
|
||||
x = wil_r(wil, RGF_USER_BL +
|
||||
offsetof(struct bl_dedicated_registers_v0,
|
||||
boot_loader_ready));
|
||||
if (x1 != x) {
|
||||
wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
|
||||
x1 = x;
|
||||
@ -617,13 +611,13 @@ static int wil_target_reset(struct wil6210_priv *wil)
|
||||
x);
|
||||
return -ETIME;
|
||||
}
|
||||
} while (x != BIT_BL_READY);
|
||||
} while (x != BL_READY);
|
||||
|
||||
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
|
||||
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
|
||||
|
||||
/* enable fix for HW bug related to the SA/DA swap in AP Rx */
|
||||
S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
|
||||
BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
|
||||
wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
|
||||
BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
|
||||
|
||||
wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
|
||||
return 0;
|
||||
@ -641,29 +635,93 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
|
||||
static int wil_get_bl_info(struct wil6210_priv *wil)
|
||||
{
|
||||
struct net_device *ndev = wil_to_ndev(wil);
|
||||
struct RGF_BL bl;
|
||||
union {
|
||||
struct bl_dedicated_registers_v0 bl0;
|
||||
struct bl_dedicated_registers_v1 bl1;
|
||||
} bl;
|
||||
u32 bl_ver;
|
||||
u8 *mac;
|
||||
u16 rf_status;
|
||||
|
||||
wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl));
|
||||
le32_to_cpus(&bl.ready);
|
||||
le32_to_cpus(&bl.version);
|
||||
le32_to_cpus(&bl.rf_type);
|
||||
le32_to_cpus(&bl.baseband_type);
|
||||
wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL),
|
||||
sizeof(bl));
|
||||
bl_ver = le32_to_cpu(bl.bl0.boot_loader_struct_version);
|
||||
mac = bl.bl0.mac_address;
|
||||
|
||||
if (!is_valid_ether_addr(bl.mac_address)) {
|
||||
wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address);
|
||||
if (bl_ver == 0) {
|
||||
le32_to_cpus(&bl.bl0.rf_type);
|
||||
le32_to_cpus(&bl.bl0.baseband_type);
|
||||
rf_status = 0; /* actually, unknown */
|
||||
wil_info(wil,
|
||||
"Boot Loader struct v%d: MAC = %pM RF = 0x%08x bband = 0x%08x\n",
|
||||
bl_ver, mac,
|
||||
bl.bl0.rf_type, bl.bl0.baseband_type);
|
||||
wil_info(wil, "Boot Loader build unknown for struct v0\n");
|
||||
} else {
|
||||
le16_to_cpus(&bl.bl1.rf_type);
|
||||
rf_status = le16_to_cpu(bl.bl1.rf_status);
|
||||
le32_to_cpus(&bl.bl1.baseband_type);
|
||||
le16_to_cpus(&bl.bl1.bl_version_subminor);
|
||||
le16_to_cpus(&bl.bl1.bl_version_build);
|
||||
wil_info(wil,
|
||||
"Boot Loader struct v%d: MAC = %pM RF = 0x%04x (status 0x%04x) bband = 0x%08x\n",
|
||||
bl_ver, mac,
|
||||
bl.bl1.rf_type, rf_status,
|
||||
bl.bl1.baseband_type);
|
||||
wil_info(wil, "Boot Loader build %d.%d.%d.%d\n",
|
||||
bl.bl1.bl_version_major, bl.bl1.bl_version_minor,
|
||||
bl.bl1.bl_version_subminor, bl.bl1.bl_version_build);
|
||||
}
|
||||
|
||||
if (!is_valid_ether_addr(mac)) {
|
||||
wil_err(wil, "BL: Invalid MAC %pM\n", mac);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ether_addr_copy(ndev->perm_addr, bl.mac_address);
|
||||
ether_addr_copy(ndev->perm_addr, mac);
|
||||
if (!is_valid_ether_addr(ndev->dev_addr))
|
||||
ether_addr_copy(ndev->dev_addr, bl.mac_address);
|
||||
wil_info(wil,
|
||||
"Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n",
|
||||
bl.version, bl.mac_address, bl.rf_type, bl.baseband_type);
|
||||
ether_addr_copy(ndev->dev_addr, mac);
|
||||
|
||||
if (rf_status) {/* bad RF cable? */
|
||||
wil_err(wil, "RF communication error 0x%04x",
|
||||
rf_status);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
|
||||
{
|
||||
u32 bl_assert_code, bl_assert_blink, bl_magic_number;
|
||||
u32 bl_ver = wil_r(wil, RGF_USER_BL +
|
||||
offsetof(struct bl_dedicated_registers_v0,
|
||||
boot_loader_struct_version));
|
||||
|
||||
if (bl_ver < 2)
|
||||
return;
|
||||
|
||||
bl_assert_code = wil_r(wil, RGF_USER_BL +
|
||||
offsetof(struct bl_dedicated_registers_v1,
|
||||
bl_assert_code));
|
||||
bl_assert_blink = wil_r(wil, RGF_USER_BL +
|
||||
offsetof(struct bl_dedicated_registers_v1,
|
||||
bl_assert_blink));
|
||||
bl_magic_number = wil_r(wil, RGF_USER_BL +
|
||||
offsetof(struct bl_dedicated_registers_v1,
|
||||
bl_magic_number));
|
||||
|
||||
if (is_err) {
|
||||
wil_err(wil,
|
||||
"BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
|
||||
bl_assert_code, bl_assert_blink, bl_magic_number);
|
||||
} else {
|
||||
wil_dbg_misc(wil,
|
||||
"BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
|
||||
bl_assert_code, bl_assert_blink, bl_magic_number);
|
||||
}
|
||||
}
|
||||
|
||||
static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
|
||||
{
|
||||
ulong to = msecs_to_jiffies(1000);
|
||||
@ -690,9 +748,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
||||
|
||||
wil_dbg_misc(wil, "%s()\n", __func__);
|
||||
|
||||
if (wil->hw_version == HW_VER_UNKNOWN)
|
||||
return -ENODEV;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&wil->mutex));
|
||||
WARN_ON(test_bit(wil_status_napi_en, wil->status));
|
||||
|
||||
@ -707,6 +762,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (wil->hw_version == HW_VER_UNKNOWN)
|
||||
return -ENODEV;
|
||||
|
||||
cancel_work_sync(&wil->disconnect_worker);
|
||||
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
|
||||
wil_bcast_fini(wil);
|
||||
@ -729,12 +787,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
||||
flush_workqueue(wil->wq_service);
|
||||
flush_workqueue(wil->wmi_wq);
|
||||
|
||||
wil_bl_crash_info(wil, false);
|
||||
rc = wil_target_reset(wil);
|
||||
wil_rx_fini(wil);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
wil_bl_crash_info(wil, true);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = wil_get_bl_info(wil);
|
||||
if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
|
||||
rc = 0;
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -752,7 +815,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
||||
return rc;
|
||||
|
||||
/* Mark FW as loaded from host */
|
||||
S(RGF_USER_USAGE_6, 1);
|
||||
wil_s(wil, RGF_USER_USAGE_6, 1);
|
||||
|
||||
/* clear any interrupts which on-card-firmware
|
||||
* may have set
|
||||
@ -760,8 +823,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
||||
wil6210_clear_irq(wil);
|
||||
/* CAF_ICR - clear and mask */
|
||||
/* it is W1C, clear by writing back same value */
|
||||
S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
|
||||
W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
|
||||
wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
|
||||
wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
|
||||
|
||||
wil_release_cpu(wil);
|
||||
}
|
||||
@ -785,11 +848,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
||||
return rc;
|
||||
}
|
||||
|
||||
#undef R
|
||||
#undef W
|
||||
#undef S
|
||||
#undef C
|
||||
|
||||
void wil_fw_error_recovery(struct wil6210_priv *wil)
|
||||
{
|
||||
wil_dbg_misc(wil, "starting fw error recovery\n");
|
||||
|
@ -173,7 +173,10 @@ void *wil_if_alloc(struct device *dev)
|
||||
wil_set_ethtoolops(ndev);
|
||||
ndev->ieee80211_ptr = wdev;
|
||||
ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
|
||||
NETIF_F_SG | NETIF_F_GRO;
|
||||
NETIF_F_SG | NETIF_F_GRO |
|
||||
NETIF_F_TSO | NETIF_F_TSO6 |
|
||||
NETIF_F_RXHASH;
|
||||
|
||||
ndev->features |= ndev->hw_features;
|
||||
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
|
||||
wdev->netdev = ndev;
|
||||
|
@ -21,16 +21,14 @@
|
||||
|
||||
#include "wil6210.h"
|
||||
|
||||
static int use_msi = 1;
|
||||
module_param(use_msi, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(use_msi,
|
||||
" Use MSI interrupt: "
|
||||
"0 - don't, 1 - (default) - single, or 3");
|
||||
static bool use_msi = true;
|
||||
module_param(use_msi, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
|
||||
|
||||
static
|
||||
void wil_set_capabilities(struct wil6210_priv *wil)
|
||||
{
|
||||
u32 rev_id = ioread32(wil->csr + HOSTADDR(RGF_USER_JTAG_DEV_ID));
|
||||
u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
|
||||
|
||||
bitmap_zero(wil->hw_capabilities, hw_capability_last);
|
||||
|
||||
@ -50,24 +48,12 @@ void wil_set_capabilities(struct wil6210_priv *wil)
|
||||
|
||||
void wil_disable_irq(struct wil6210_priv *wil)
|
||||
{
|
||||
int irq = wil->pdev->irq;
|
||||
|
||||
disable_irq(irq);
|
||||
if (wil->n_msi == 3) {
|
||||
disable_irq(irq + 1);
|
||||
disable_irq(irq + 2);
|
||||
}
|
||||
disable_irq(wil->pdev->irq);
|
||||
}
|
||||
|
||||
void wil_enable_irq(struct wil6210_priv *wil)
|
||||
{
|
||||
int irq = wil->pdev->irq;
|
||||
|
||||
enable_irq(irq);
|
||||
if (wil->n_msi == 3) {
|
||||
enable_irq(irq + 1);
|
||||
enable_irq(irq + 2);
|
||||
}
|
||||
enable_irq(wil->pdev->irq);
|
||||
}
|
||||
|
||||
/* Bus ops */
|
||||
@ -80,6 +66,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
|
||||
* and only MSI should be used
|
||||
*/
|
||||
int msi_only = pdev->msi_enabled;
|
||||
bool _use_msi = use_msi;
|
||||
|
||||
wil_dbg_misc(wil, "%s()\n", __func__);
|
||||
|
||||
@ -87,41 +74,20 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
/*
|
||||
* how many MSI interrupts to request?
|
||||
*/
|
||||
switch (use_msi) {
|
||||
case 3:
|
||||
case 1:
|
||||
wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
|
||||
break;
|
||||
case 0:
|
||||
wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
|
||||
break;
|
||||
default:
|
||||
wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
|
||||
use_msi = 1;
|
||||
}
|
||||
wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
|
||||
|
||||
if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
|
||||
wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
|
||||
use_msi = 1;
|
||||
}
|
||||
|
||||
if (use_msi == 1 && pci_enable_msi(pdev)) {
|
||||
if (use_msi && pci_enable_msi(pdev)) {
|
||||
wil_err(wil, "pci_enable_msi failed, use INTx\n");
|
||||
use_msi = 0;
|
||||
_use_msi = false;
|
||||
}
|
||||
|
||||
wil->n_msi = use_msi;
|
||||
|
||||
if ((wil->n_msi == 0) && msi_only) {
|
||||
if (!_use_msi && msi_only) {
|
||||
wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
|
||||
rc = -ENODEV;
|
||||
goto stop_master;
|
||||
}
|
||||
|
||||
rc = wil6210_init_irq(wil, pdev->irq);
|
||||
rc = wil6210_init_irq(wil, pdev->irq, _use_msi);
|
||||
if (rc)
|
||||
goto stop_master;
|
||||
|
||||
@ -293,11 +259,80 @@ static const struct pci_device_id wil6210_pcie_ids[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static int wil6210_suspend(struct device *dev, bool is_runtime)
|
||||
{
|
||||
int rc = 0;
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct wil6210_priv *wil = pci_get_drvdata(pdev);
|
||||
|
||||
wil_dbg_pm(wil, "%s(%s)\n", __func__,
|
||||
is_runtime ? "runtime" : "system");
|
||||
|
||||
rc = wil_can_suspend(wil, is_runtime);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = wil_suspend(wil, is_runtime);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
/* TODO: how do I bring card in low power state? */
|
||||
|
||||
/* disable bus mastering */
|
||||
pci_clear_master(pdev);
|
||||
/* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
|
||||
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int wil6210_resume(struct device *dev, bool is_runtime)
|
||||
{
|
||||
int rc = 0;
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct wil6210_priv *wil = pci_get_drvdata(pdev);
|
||||
|
||||
wil_dbg_pm(wil, "%s(%s)\n", __func__,
|
||||
is_runtime ? "runtime" : "system");
|
||||
|
||||
/* allow master */
|
||||
pci_set_master(pdev);
|
||||
|
||||
rc = wil_resume(wil, is_runtime);
|
||||
if (rc)
|
||||
pci_clear_master(pdev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int wil6210_pm_suspend(struct device *dev)
|
||||
{
|
||||
return wil6210_suspend(dev, false);
|
||||
}
|
||||
|
||||
static int wil6210_pm_resume(struct device *dev)
|
||||
{
|
||||
return wil6210_resume(dev, false);
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static const struct dev_pm_ops wil6210_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
|
||||
};
|
||||
|
||||
static struct pci_driver wil6210_driver = {
|
||||
.probe = wil_pcie_probe,
|
||||
.remove = wil_pcie_remove,
|
||||
.id_table = wil6210_pcie_ids,
|
||||
.name = WIL_NAME,
|
||||
.driver = {
|
||||
.pm = &wil6210_pm_ops,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init wil6210_driver_init(void)
|
||||
|
98
drivers/net/wireless/ath/wil6210/pm.c
Normal file
98
drivers/net/wireless/ath/wil6210/pm.c
Normal file
@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "wil6210.h"
|
||||
|
||||
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
|
||||
{
|
||||
int rc = 0;
|
||||
struct wireless_dev *wdev = wil->wdev;
|
||||
|
||||
wil_dbg_pm(wil, "%s(%s)\n", __func__,
|
||||
is_runtime ? "runtime" : "system");
|
||||
|
||||
switch (wdev->iftype) {
|
||||
case NL80211_IFTYPE_MONITOR:
|
||||
case NL80211_IFTYPE_STATION:
|
||||
case NL80211_IFTYPE_P2P_CLIENT:
|
||||
break;
|
||||
/* AP-like interface - can't suspend */
|
||||
default:
|
||||
wil_dbg_pm(wil, "AP-like interface\n");
|
||||
rc = -EBUSY;
|
||||
break;
|
||||
}
|
||||
|
||||
wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
|
||||
is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
|
||||
{
|
||||
int rc = 0;
|
||||
struct net_device *ndev = wil_to_ndev(wil);
|
||||
|
||||
wil_dbg_pm(wil, "%s(%s)\n", __func__,
|
||||
is_runtime ? "runtime" : "system");
|
||||
|
||||
/* if netif up, hardware is alive, shut it down */
|
||||
if (ndev->flags & IFF_UP) {
|
||||
rc = wil_down(wil);
|
||||
if (rc) {
|
||||
wil_err(wil, "wil_down : %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (wil->platform_ops.suspend)
|
||||
rc = wil->platform_ops.suspend(wil->platform_handle);
|
||||
|
||||
out:
|
||||
wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
|
||||
is_runtime ? "runtime" : "system", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int wil_resume(struct wil6210_priv *wil, bool is_runtime)
|
||||
{
|
||||
int rc = 0;
|
||||
struct net_device *ndev = wil_to_ndev(wil);
|
||||
|
||||
wil_dbg_pm(wil, "%s(%s)\n", __func__,
|
||||
is_runtime ? "runtime" : "system");
|
||||
|
||||
if (wil->platform_ops.resume) {
|
||||
rc = wil->platform_ops.resume(wil->platform_handle);
|
||||
if (rc) {
|
||||
wil_err(wil, "platform_ops.resume : %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* if netif up, bring hardware up
|
||||
* During open(), IFF_UP set after actual device method
|
||||
* invocation. This prevent recursive call to wil_up()
|
||||
*/
|
||||
if (ndev->flags & IFF_UP)
|
||||
rc = wil_up(wil);
|
||||
|
||||
out:
|
||||
wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
|
||||
is_runtime ? "runtime" : "system", rc);
|
||||
return rc;
|
||||
}
|
@ -121,6 +121,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
|
||||
goto out;
|
||||
}
|
||||
|
||||
r->total++;
|
||||
hseq = r->head_seq_num;
|
||||
|
||||
/** Due to the race between WMI events, where BACK establishment
|
||||
@ -153,6 +154,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
|
||||
/* frame with out of date sequence number */
|
||||
if (seq_less(seq, r->head_seq_num)) {
|
||||
r->ssn_last_drop = seq;
|
||||
r->drop_old++;
|
||||
wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
|
||||
seq, r->head_seq_num);
|
||||
dev_kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
@ -173,6 +177,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
|
||||
|
||||
/* check if we already stored this frame */
|
||||
if (r->reorder_buf[index]) {
|
||||
r->drop_dup++;
|
||||
wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
|
||||
dev_kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
|
@ -509,7 +509,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
|
||||
break;
|
||||
}
|
||||
}
|
||||
iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
|
||||
wil_w(wil, v->hwtail, v->swtail);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -541,6 +541,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
|
||||
[GRO_DROP] = "GRO_DROP",
|
||||
};
|
||||
|
||||
if (ndev->features & NETIF_F_RXHASH)
|
||||
/* fake L4 to ensure it won't be re-calculated later
|
||||
* set hash to any non-zero value to activate rps
|
||||
* mechanism, core will be chosen according
|
||||
* to user-level rps configuration.
|
||||
*/
|
||||
skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
|
||||
|
||||
skb_orphan(skb);
|
||||
|
||||
if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
|
||||
@ -1058,14 +1066,52 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
|
||||
static inline
|
||||
void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
|
||||
{
|
||||
d->mac.d[2] |= ((nr_frags + 1) <<
|
||||
MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
|
||||
d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
|
||||
}
|
||||
|
||||
static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
|
||||
struct vring_tx_desc *d,
|
||||
struct sk_buff *skb)
|
||||
/**
|
||||
* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
|
||||
* @skb is used to obtain the protocol and headers length.
|
||||
* @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
|
||||
* 2 - middle, 3 - last descriptor.
|
||||
*/
|
||||
|
||||
static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
|
||||
struct sk_buff *skb,
|
||||
int tso_desc_type, bool is_ipv4,
|
||||
int tcp_hdr_len, int skb_net_hdr_len)
|
||||
{
|
||||
d->dma.b11 = ETH_HLEN; /* MAC header length */
|
||||
d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
|
||||
|
||||
d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
|
||||
/* L4 header len: TCP header length */
|
||||
d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
|
||||
|
||||
/* Setup TSO: bit and desc type */
|
||||
d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
|
||||
(tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
|
||||
d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
|
||||
|
||||
d->dma.ip_length = skb_net_hdr_len;
|
||||
/* Enable TCP/UDP checksum */
|
||||
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
|
||||
/* Calculate pseudo-header */
|
||||
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the descriptor @d up for csum. The corresponding
|
||||
* @skb is used to obtain the protocol and headers length.
|
||||
* Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
|
||||
* Note, if d==NULL, the function only returns the protocol result.
|
||||
*
|
||||
* It is very similar to previous wil_tx_desc_offload_setup_tso. This
|
||||
* is "if unrolling" to optimize the critical path.
|
||||
*/
|
||||
|
||||
static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
|
||||
struct sk_buff *skb){
|
||||
int protocol;
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
||||
@ -1110,6 +1156,305 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void wil_tx_last_desc(struct vring_tx_desc *d)
|
||||
{
|
||||
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
|
||||
BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
|
||||
BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
|
||||
}
|
||||
|
||||
static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
|
||||
{
|
||||
d->dma.d0 |= wil_tso_type_lst <<
|
||||
DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
|
||||
}
|
||||
|
||||
static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct device *dev = wil_to_dev(wil);
|
||||
|
||||
/* point to descriptors in shared memory */
|
||||
volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
|
||||
*_first_desc = NULL;
|
||||
|
||||
/* pointers to shadow descriptors */
|
||||
struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
|
||||
*d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
|
||||
*first_desc = &first_desc_mem;
|
||||
|
||||
/* pointer to shadow descriptors' context */
|
||||
struct wil_ctx *hdr_ctx, *first_ctx = NULL;
|
||||
|
||||
int descs_used = 0; /* total number of used descriptors */
|
||||
int sg_desc_cnt = 0; /* number of descriptors for current mss*/
|
||||
|
||||
u32 swhead = vring->swhead;
|
||||
int used, avail = wil_vring_avail_tx(vring);
|
||||
int nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
int min_desc_required = nr_frags + 1;
|
||||
int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
|
||||
int f, len, hdrlen, headlen;
|
||||
int vring_index = vring - wil->vring_tx;
|
||||
struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
|
||||
uint i = swhead;
|
||||
dma_addr_t pa;
|
||||
const skb_frag_t *frag = NULL;
|
||||
int rem_data = mss;
|
||||
int lenmss;
|
||||
int hdr_compensation_need = true;
|
||||
int desc_tso_type = wil_tso_type_first;
|
||||
bool is_ipv4;
|
||||
int tcp_hdr_len;
|
||||
int skb_net_hdr_len;
|
||||
int gso_type;
|
||||
|
||||
wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
|
||||
__func__, skb->len, vring_index);
|
||||
|
||||
if (unlikely(!txdata->enabled))
|
||||
return -EINVAL;
|
||||
|
||||
/* A typical page 4K is 3-4 payloads, we assume each fragment
|
||||
* is a full payload, that's how min_desc_required has been
|
||||
* calculated. In real we might need more or less descriptors,
|
||||
* this is the initial check only.
|
||||
*/
|
||||
if (unlikely(avail < min_desc_required)) {
|
||||
wil_err_ratelimited(wil,
|
||||
"TSO: Tx ring[%2d] full. No space for %d fragments\n",
|
||||
vring_index, min_desc_required);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Header Length = MAC header len + IP header len + TCP header len*/
|
||||
hdrlen = ETH_HLEN +
|
||||
(int)skb_network_header_len(skb) +
|
||||
tcp_hdrlen(skb);
|
||||
|
||||
gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
|
||||
switch (gso_type) {
|
||||
case SKB_GSO_TCPV4:
|
||||
/* TCP v4, zero out the IP length and IPv4 checksum fields
|
||||
* as required by the offloading doc
|
||||
*/
|
||||
ip_hdr(skb)->tot_len = 0;
|
||||
ip_hdr(skb)->check = 0;
|
||||
is_ipv4 = true;
|
||||
break;
|
||||
case SKB_GSO_TCPV6:
|
||||
/* TCP v6, zero out the payload length */
|
||||
ipv6_hdr(skb)->payload_len = 0;
|
||||
is_ipv4 = false;
|
||||
break;
|
||||
default:
|
||||
/* other than TCPv4 or TCPv6 types are not supported for TSO.
|
||||
* It is also illegal for both to be set simultaneously
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
||||
return -EINVAL;
|
||||
|
||||
/* tcp header length and skb network header length are fixed for all
|
||||
* packet's descriptors - read then once here
|
||||
*/
|
||||
tcp_hdr_len = tcp_hdrlen(skb);
|
||||
skb_net_hdr_len = skb_network_header_len(skb);
|
||||
|
||||
_hdr_desc = &vring->va[i].tx;
|
||||
|
||||
pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, pa))) {
|
||||
wil_err(wil, "TSO: Skb head DMA map error\n");
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
|
||||
wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
|
||||
tcp_hdr_len, skb_net_hdr_len);
|
||||
wil_tx_last_desc(hdr_desc);
|
||||
|
||||
vring->ctx[i].mapped_as = wil_mapped_as_single;
|
||||
hdr_ctx = &vring->ctx[i];
|
||||
|
||||
descs_used++;
|
||||
headlen = skb_headlen(skb) - hdrlen;
|
||||
|
||||
for (f = headlen ? -1 : 0; f < nr_frags; f++) {
|
||||
if (headlen) {
|
||||
len = headlen;
|
||||
wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
|
||||
len);
|
||||
} else {
|
||||
frag = &skb_shinfo(skb)->frags[f];
|
||||
len = frag->size;
|
||||
wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
|
||||
}
|
||||
|
||||
while (len) {
|
||||
wil_dbg_txrx(wil,
|
||||
"TSO: len %d, rem_data %d, descs_used %d\n",
|
||||
len, rem_data, descs_used);
|
||||
|
||||
if (descs_used == avail) {
|
||||
wil_err(wil, "TSO: ring overflow\n");
|
||||
goto dma_error;
|
||||
}
|
||||
|
||||
lenmss = min_t(int, rem_data, len);
|
||||
i = (swhead + descs_used) % vring->size;
|
||||
wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
|
||||
|
||||
if (!headlen) {
|
||||
pa = skb_frag_dma_map(dev, frag,
|
||||
frag->size - len, lenmss,
|
||||
DMA_TO_DEVICE);
|
||||
vring->ctx[i].mapped_as = wil_mapped_as_page;
|
||||
} else {
|
||||
pa = dma_map_single(dev,
|
||||
skb->data +
|
||||
skb_headlen(skb) - headlen,
|
||||
lenmss,
|
||||
DMA_TO_DEVICE);
|
||||
vring->ctx[i].mapped_as = wil_mapped_as_single;
|
||||
headlen -= lenmss;
|
||||
}
|
||||
|
||||
if (unlikely(dma_mapping_error(dev, pa)))
|
||||
goto dma_error;
|
||||
|
||||
_desc = &vring->va[i].tx;
|
||||
|
||||
if (!_first_desc) {
|
||||
_first_desc = _desc;
|
||||
first_ctx = &vring->ctx[i];
|
||||
d = first_desc;
|
||||
} else {
|
||||
d = &desc_mem;
|
||||
}
|
||||
|
||||
wil_tx_desc_map(d, pa, lenmss, vring_index);
|
||||
wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
|
||||
is_ipv4, tcp_hdr_len,
|
||||
skb_net_hdr_len);
|
||||
|
||||
/* use tso_type_first only once */
|
||||
desc_tso_type = wil_tso_type_mid;
|
||||
|
||||
descs_used++; /* desc used so far */
|
||||
sg_desc_cnt++; /* desc used for this segment */
|
||||
len -= lenmss;
|
||||
rem_data -= lenmss;
|
||||
|
||||
wil_dbg_txrx(wil,
|
||||
"TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
|
||||
len, rem_data, descs_used, sg_desc_cnt);
|
||||
|
||||
/* Close the segment if reached mss size or last frag*/
|
||||
if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
|
||||
if (hdr_compensation_need) {
|
||||
/* first segment include hdr desc for
|
||||
* release
|
||||
*/
|
||||
hdr_ctx->nr_frags = sg_desc_cnt;
|
||||
wil_tx_desc_set_nr_frags(first_desc,
|
||||
sg_desc_cnt +
|
||||
1);
|
||||
hdr_compensation_need = false;
|
||||
} else {
|
||||
wil_tx_desc_set_nr_frags(first_desc,
|
||||
sg_desc_cnt);
|
||||
}
|
||||
first_ctx->nr_frags = sg_desc_cnt - 1;
|
||||
|
||||
wil_tx_last_desc(d);
|
||||
|
||||
/* first descriptor may also be the last
|
||||
* for this mss - make sure not to copy
|
||||
* it twice
|
||||
*/
|
||||
if (first_desc != d)
|
||||
*_first_desc = *first_desc;
|
||||
|
||||
/*last descriptor will be copied at the end
|
||||
* of this TS processing
|
||||
*/
|
||||
if (f < nr_frags - 1 || len > 0)
|
||||
*_desc = *d;
|
||||
|
||||
rem_data = mss;
|
||||
_first_desc = NULL;
|
||||
sg_desc_cnt = 0;
|
||||
} else if (first_desc != d) /* update mid descriptor */
|
||||
*_desc = *d;
|
||||
}
|
||||
}
|
||||
|
||||
/* first descriptor may also be the last.
|
||||
* in this case d pointer is invalid
|
||||
*/
|
||||
if (_first_desc == _desc)
|
||||
d = first_desc;
|
||||
|
||||
/* Last data descriptor */
|
||||
wil_set_tx_desc_last_tso(d);
|
||||
*_desc = *d;
|
||||
|
||||
/* Fill the total number of descriptors in first desc (hdr)*/
|
||||
wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
|
||||
*_hdr_desc = *hdr_desc;
|
||||
|
||||
/* hold reference to skb
|
||||
* to prevent skb release before accounting
|
||||
* in case of immediate "tx done"
|
||||
*/
|
||||
vring->ctx[i].skb = skb_get(skb);
|
||||
|
||||
/* performance monitoring */
|
||||
used = wil_vring_used_tx(vring);
|
||||
if (wil_val_in_range(vring_idle_trsh,
|
||||
used, used + descs_used)) {
|
||||
txdata->idle += get_cycles() - txdata->last_idle;
|
||||
wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
|
||||
vring_index, used, used + descs_used);
|
||||
}
|
||||
|
||||
/* advance swhead */
|
||||
wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
|
||||
wil_vring_advance_head(vring, descs_used);
|
||||
|
||||
/* make sure all writes to descriptors (shared memory) are done before
|
||||
* committing them to HW
|
||||
*/
|
||||
wmb();
|
||||
|
||||
wil_w(wil, vring->hwtail, vring->swhead);
|
||||
return 0;
|
||||
|
||||
dma_error:
|
||||
wil_err(wil, "TSO: DMA map page error\n");
|
||||
while (descs_used > 0) {
|
||||
struct wil_ctx *ctx;
|
||||
|
||||
i = (swhead + descs_used) % vring->size;
|
||||
d = (struct vring_tx_desc *)&vring->va[i].tx;
|
||||
_desc = &vring->va[i].tx;
|
||||
*d = *_desc;
|
||||
_desc->dma.status = TX_DMA_STATUS_DU;
|
||||
ctx = &vring->ctx[i];
|
||||
wil_txdesc_unmap(dev, d, ctx);
|
||||
if (ctx->skb)
|
||||
dev_kfree_skb_any(ctx->skb);
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
descs_used--;
|
||||
}
|
||||
|
||||
err_exit:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
@ -1128,7 +1473,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
||||
bool mcast = (vring_index == wil->bcast_vring);
|
||||
uint len = skb_headlen(skb);
|
||||
|
||||
wil_dbg_txrx(wil, "%s()\n", __func__);
|
||||
wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
|
||||
__func__, skb->len, vring_index);
|
||||
|
||||
if (unlikely(!txdata->enabled))
|
||||
return -EINVAL;
|
||||
@ -1159,14 +1505,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
||||
d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
|
||||
}
|
||||
/* Process TCP/UDP checksum offloading */
|
||||
if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
|
||||
if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
|
||||
wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
|
||||
vring_index);
|
||||
goto dma_error;
|
||||
}
|
||||
|
||||
vring->ctx[i].nr_frags = nr_frags;
|
||||
wil_tx_desc_set_nr_frags(d, nr_frags);
|
||||
wil_tx_desc_set_nr_frags(d, nr_frags + 1);
|
||||
|
||||
/* middle segments */
|
||||
for (; f < nr_frags; f++) {
|
||||
@ -1190,7 +1536,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
||||
* if it succeeded for 1-st descriptor,
|
||||
* it will succeed here too
|
||||
*/
|
||||
wil_tx_desc_offload_cksum_set(wil, d, skb);
|
||||
wil_tx_desc_offload_setup(d, skb);
|
||||
}
|
||||
/* for the last seg only */
|
||||
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
|
||||
@ -1221,7 +1567,13 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
||||
wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
|
||||
vring->swhead);
|
||||
trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
|
||||
iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
|
||||
|
||||
/* make sure all writes to descriptors (shared memory) are done before
|
||||
* committing them to HW
|
||||
*/
|
||||
wmb();
|
||||
|
||||
wil_w(wil, vring->hwtail, vring->swhead);
|
||||
|
||||
return 0;
|
||||
dma_error:
|
||||
@ -1254,8 +1606,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
||||
int rc;
|
||||
|
||||
spin_lock(&txdata->lock);
|
||||
rc = __wil_tx_vring(wil, vring, skb);
|
||||
|
||||
rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
|
||||
(wil, vring, skb);
|
||||
|
||||
spin_unlock(&txdata->lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1382,7 +1738,8 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
|
||||
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
|
||||
/**
|
||||
* For the fragmented skb, HW will set DU bit only for the
|
||||
* last fragment. look for it
|
||||
* last fragment. look for it.
|
||||
* In TSO the first DU will include hdr desc
|
||||
*/
|
||||
int lf = (vring->swtail + ctx->nr_frags) % vring->size;
|
||||
/* TODO: check we are not past head */
|
||||
|
@ -291,6 +291,14 @@ struct vring_tx_dma {
|
||||
__le16 length;
|
||||
} __packed;
|
||||
|
||||
/* TSO type used in dma descriptor d0 bits 11-12 */
|
||||
enum {
|
||||
wil_tso_type_hdr = 0,
|
||||
wil_tso_type_first = 1,
|
||||
wil_tso_type_mid = 2,
|
||||
wil_tso_type_lst = 3,
|
||||
};
|
||||
|
||||
/* Rx descriptor - MAC part
|
||||
* [dword 0]
|
||||
* bit 0.. 3 : tid:4 The QoS (b3-0) TID Field
|
||||
|
@ -127,16 +127,6 @@ struct RGF_ICR {
|
||||
u32 IMC; /* Mask Clear, write 1 to clear */
|
||||
} __packed;
|
||||
|
||||
struct RGF_BL {
|
||||
u32 ready; /* 0x880A3C bit [0] */
|
||||
#define BIT_BL_READY BIT(0)
|
||||
u32 version; /* 0x880A40 version of the BL struct */
|
||||
u32 rf_type; /* 0x880A44 ID of the connected RF */
|
||||
u32 baseband_type; /* 0x880A48 ID of the baseband */
|
||||
u8 mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */
|
||||
u8 pad[2];
|
||||
} __packed;
|
||||
|
||||
/* registers - FW addresses */
|
||||
#define RGF_USER_USAGE_1 (0x880004)
|
||||
#define RGF_USER_USAGE_6 (0x880018)
|
||||
@ -262,9 +252,8 @@ enum {
|
||||
};
|
||||
|
||||
/* popular locations */
|
||||
#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
|
||||
#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
|
||||
offsetof(struct RGF_ICR, ICS))
|
||||
#define RGF_MBOX RGF_USER_USER_SCRATCH_PAD
|
||||
#define HOST_MBOX HOSTADDR(RGF_MBOX)
|
||||
#define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
|
||||
|
||||
/* ISR register bits */
|
||||
@ -434,12 +423,12 @@ struct pci_dev;
|
||||
* @ssn: Starting Sequence Number expected to be aggregated.
|
||||
* @buf_size: buffer size for incoming A-MPDUs
|
||||
* @timeout: reset timer value (in TUs).
|
||||
* @ssn_last_drop: SSN of the last dropped frame
|
||||
* @total: total number of processed incoming frames
|
||||
* @drop_dup: duplicate frames dropped for this reorder buffer
|
||||
* @drop_old: old frames dropped for this reorder buffer
|
||||
* @dialog_token: dialog token for aggregation session
|
||||
* @rcu_head: RCU head used for freeing this struct
|
||||
*
|
||||
* This structure's lifetime is managed by RCU, assignments to
|
||||
* the array holding it must hold the aggregation mutex.
|
||||
*
|
||||
* @first_time: true when this buffer used 1-st time
|
||||
*/
|
||||
struct wil_tid_ampdu_rx {
|
||||
struct sk_buff **reorder_buf;
|
||||
@ -453,6 +442,9 @@ struct wil_tid_ampdu_rx {
|
||||
u16 buf_size;
|
||||
u16 timeout;
|
||||
u16 ssn_last_drop;
|
||||
unsigned long long total; /* frames processed */
|
||||
unsigned long long drop_dup;
|
||||
unsigned long long drop_old;
|
||||
u8 dialog_token;
|
||||
bool first_time; /* is it 1-st time this buffer used? */
|
||||
};
|
||||
@ -543,7 +535,6 @@ struct pmc_ctx {
|
||||
|
||||
struct wil6210_priv {
|
||||
struct pci_dev *pdev;
|
||||
int n_msi;
|
||||
struct wireless_dev *wdev;
|
||||
void __iomem *csr;
|
||||
DECLARE_BITMAP(status, wil_status_last);
|
||||
@ -656,6 +647,33 @@ void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
|
||||
#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
|
||||
#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
|
||||
#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
|
||||
#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg)
|
||||
|
||||
/* target operations */
|
||||
/* register read */
|
||||
static inline u32 wil_r(struct wil6210_priv *wil, u32 reg)
|
||||
{
|
||||
return readl(wil->csr + HOSTADDR(reg));
|
||||
}
|
||||
|
||||
/* register write. wmb() to make sure it is completed */
|
||||
static inline void wil_w(struct wil6210_priv *wil, u32 reg, u32 val)
|
||||
{
|
||||
writel(val, wil->csr + HOSTADDR(reg));
|
||||
wmb(); /* wait for write to propagate to the HW */
|
||||
}
|
||||
|
||||
/* register set = read, OR, write */
|
||||
static inline void wil_s(struct wil6210_priv *wil, u32 reg, u32 val)
|
||||
{
|
||||
wil_w(wil, reg, wil_r(wil, reg) | val);
|
||||
}
|
||||
|
||||
/* register clear = read, AND with inverted, write */
|
||||
static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
|
||||
{
|
||||
wil_w(wil, reg, wil_r(wil, reg) & ~val);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DYNAMIC_DEBUG)
|
||||
#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
|
||||
@ -746,7 +764,7 @@ void wil_back_tx_worker(struct work_struct *work);
|
||||
void wil_back_tx_flush(struct wil6210_priv *wil);
|
||||
|
||||
void wil6210_clear_irq(struct wil6210_priv *wil);
|
||||
int wil6210_init_irq(struct wil6210_priv *wil, int irq);
|
||||
int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
|
||||
void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
|
||||
void wil_mask_irq(struct wil6210_priv *wil);
|
||||
void wil_unmask_irq(struct wil6210_priv *wil);
|
||||
@ -798,4 +816,8 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type);
|
||||
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
|
||||
int wil_request_firmware(struct wil6210_priv *wil, const char *name);
|
||||
|
||||
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
|
||||
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
|
||||
int wil_resume(struct wil6210_priv *wil, bool is_runtime);
|
||||
|
||||
#endif /* __WIL6210_H__ */
|
||||
|
@ -14,7 +14,7 @@
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "linux/device.h"
|
||||
#include <linux/device.h>
|
||||
#include "wil_platform.h"
|
||||
|
||||
int __init wil_platform_modinit(void)
|
||||
|
@ -228,8 +228,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
|
||||
wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
|
||||
/* wait till FW finish with previous command */
|
||||
for (retry = 5; retry > 0; retry--) {
|
||||
r->tail = ioread32(wil->csr + HOST_MBOX +
|
||||
offsetof(struct wil6210_mbox_ctl, tx.tail));
|
||||
r->tail = wil_r(wil, RGF_MBOX +
|
||||
offsetof(struct wil6210_mbox_ctl, tx.tail));
|
||||
if (next_head != r->tail)
|
||||
break;
|
||||
msleep(20);
|
||||
@ -254,16 +254,16 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
|
||||
wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
|
||||
wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
|
||||
/* mark entry as full */
|
||||
iowrite32(1, wil->csr + HOSTADDR(r->head) +
|
||||
offsetof(struct wil6210_mbox_ring_desc, sync));
|
||||
wil_w(wil, r->head + offsetof(struct wil6210_mbox_ring_desc, sync), 1);
|
||||
/* advance next ptr */
|
||||
iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
|
||||
offsetof(struct wil6210_mbox_ctl, tx.head));
|
||||
wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head),
|
||||
r->head = next_head);
|
||||
|
||||
trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
|
||||
|
||||
/* interrupt to FW */
|
||||
iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
|
||||
wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS),
|
||||
SW_INT_MBOX);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -312,22 +312,44 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
|
||||
struct wiphy *wiphy = wil_to_wiphy(wil);
|
||||
struct ieee80211_mgmt *rx_mgmt_frame =
|
||||
(struct ieee80211_mgmt *)data->payload;
|
||||
int ch_no = data->info.channel+1;
|
||||
u32 freq = ieee80211_channel_to_frequency(ch_no,
|
||||
IEEE80211_BAND_60GHZ);
|
||||
struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
|
||||
s32 signal = data->info.sqi;
|
||||
__le16 fc = rx_mgmt_frame->frame_control;
|
||||
u32 d_len = le32_to_cpu(data->info.len);
|
||||
u16 d_status = le16_to_cpu(data->info.status);
|
||||
int flen = len - offsetof(struct wmi_rx_mgmt_packet_event, payload);
|
||||
int ch_no;
|
||||
u32 freq;
|
||||
struct ieee80211_channel *channel;
|
||||
s32 signal;
|
||||
__le16 fc;
|
||||
u32 d_len;
|
||||
u16 d_status;
|
||||
|
||||
wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
|
||||
if (flen < 0) {
|
||||
wil_err(wil, "MGMT Rx: short event, len %d\n", len);
|
||||
return;
|
||||
}
|
||||
|
||||
d_len = le32_to_cpu(data->info.len);
|
||||
if (d_len != flen) {
|
||||
wil_err(wil,
|
||||
"MGMT Rx: length mismatch, d_len %d should be %d\n",
|
||||
d_len, flen);
|
||||
return;
|
||||
}
|
||||
|
||||
ch_no = data->info.channel + 1;
|
||||
freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ);
|
||||
channel = ieee80211_get_channel(wiphy, freq);
|
||||
signal = data->info.sqi;
|
||||
d_status = le16_to_cpu(data->info.status);
|
||||
fc = rx_mgmt_frame->frame_control;
|
||||
|
||||
wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n",
|
||||
data->info.channel, data->info.mcs, data->info.snr,
|
||||
data->info.sqi);
|
||||
wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
|
||||
le16_to_cpu(fc));
|
||||
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
|
||||
data->info.qid, data->info.mid, data->info.cid);
|
||||
wil_hex_dump_wmi("MGMT Rx ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
|
||||
d_len, true);
|
||||
|
||||
if (!channel) {
|
||||
wil_err(wil, "Frame on unsupported channel\n");
|
||||
@ -363,6 +385,17 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
|
||||
}
|
||||
}
|
||||
|
||||
static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
|
||||
{
|
||||
struct wmi_tx_mgmt_packet_event *data = d;
|
||||
struct ieee80211_mgmt *mgmt_frame =
|
||||
(struct ieee80211_mgmt *)data->payload;
|
||||
int flen = len - offsetof(struct wmi_tx_mgmt_packet_event, payload);
|
||||
|
||||
wil_hex_dump_wmi("MGMT Tx ", DUMP_PREFIX_OFFSET, 16, 1, mgmt_frame,
|
||||
flen, true);
|
||||
}
|
||||
|
||||
static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
|
||||
void *d, int len)
|
||||
{
|
||||
@ -659,6 +692,7 @@ static const struct {
|
||||
{WMI_READY_EVENTID, wmi_evt_ready},
|
||||
{WMI_FW_READY_EVENTID, wmi_evt_fw_ready},
|
||||
{WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt},
|
||||
{WMI_TX_MGMT_PACKET_EVENTID, wmi_evt_tx_mgmt},
|
||||
{WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete},
|
||||
{WMI_CONNECT_EVENTID, wmi_evt_connect},
|
||||
{WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
|
||||
@ -695,8 +729,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
|
||||
u16 len;
|
||||
bool q;
|
||||
|
||||
r->head = ioread32(wil->csr + HOST_MBOX +
|
||||
offsetof(struct wil6210_mbox_ctl, rx.head));
|
||||
r->head = wil_r(wil, RGF_MBOX +
|
||||
offsetof(struct wil6210_mbox_ctl, rx.head));
|
||||
if (r->tail == r->head)
|
||||
break;
|
||||
|
||||
@ -734,8 +768,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
|
||||
cmd = (void *)&evt->event.wmi;
|
||||
wil_memcpy_fromio_32(cmd, src, len);
|
||||
/* mark entry as empty */
|
||||
iowrite32(0, wil->csr + HOSTADDR(r->tail) +
|
||||
offsetof(struct wil6210_mbox_ring_desc, sync));
|
||||
wil_w(wil, r->tail +
|
||||
offsetof(struct wil6210_mbox_ring_desc, sync), 0);
|
||||
/* indicate */
|
||||
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
|
||||
(len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
|
||||
@ -754,8 +788,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
|
||||
/* advance tail */
|
||||
r->tail = r->base + ((r->tail - r->base +
|
||||
sizeof(struct wil6210_mbox_ring_desc)) % r->size);
|
||||
iowrite32(r->tail, wil->csr + HOST_MBOX +
|
||||
offsetof(struct wil6210_mbox_ctl, rx.tail));
|
||||
wil_w(wil, RGF_MBOX +
|
||||
offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
|
||||
|
||||
/* add to the pending list */
|
||||
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
|
||||
@ -988,12 +1022,21 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
|
||||
|
||||
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
|
||||
{
|
||||
static const char *const names[] = {
|
||||
[WMI_FRAME_BEACON] = "BEACON",
|
||||
[WMI_FRAME_PROBE_REQ] = "PROBE_REQ",
|
||||
[WMI_FRAME_PROBE_RESP] = "WMI_FRAME_PROBE_RESP",
|
||||
[WMI_FRAME_ASSOC_REQ] = "WMI_FRAME_ASSOC_REQ",
|
||||
[WMI_FRAME_ASSOC_RESP] = "WMI_FRAME_ASSOC_RESP",
|
||||
};
|
||||
int rc;
|
||||
u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
|
||||
struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
|
||||
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
if (!cmd) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (!ie)
|
||||
ie_len = 0;
|
||||
|
||||
@ -1003,6 +1046,12 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
|
||||
memcpy(cmd->ie_info, ie, ie_len);
|
||||
rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
|
||||
kfree(cmd);
|
||||
out:
|
||||
if (rc) {
|
||||
const char *name = type < ARRAY_SIZE(names) ?
|
||||
names[type] : "??";
|
||||
wil_err(wil, "set_ie(%d %s) failed : %d\n", type, name, rc);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -1129,15 +1178,42 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
|
||||
|
||||
int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
|
||||
{
|
||||
int rc;
|
||||
u16 reason_code;
|
||||
struct wmi_disconnect_sta_cmd cmd = {
|
||||
.disconnect_reason = cpu_to_le16(reason),
|
||||
};
|
||||
struct {
|
||||
struct wil6210_mbox_hdr_wmi wmi;
|
||||
struct wmi_disconnect_event evt;
|
||||
} __packed reply;
|
||||
|
||||
ether_addr_copy(cmd.dst_mac, mac);
|
||||
|
||||
wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
|
||||
|
||||
return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
|
||||
rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd),
|
||||
WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000);
|
||||
/* failure to disconnect in reasonable time treated as FW error */
|
||||
if (rc) {
|
||||
wil_fw_error_recovery(wil);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* call event handler manually after processing wmi_call,
|
||||
* to avoid deadlock - disconnect event handler acquires wil->mutex
|
||||
* while it is already held here
|
||||
*/
|
||||
reason_code = le16_to_cpu(reply.evt.protocol_reason_status);
|
||||
|
||||
wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
|
||||
reply.evt.bssid, reason_code,
|
||||
reply.evt.disconnect_reason);
|
||||
|
||||
wil->sinfo_gen++;
|
||||
wil6210_disconnect(wil, reply.evt.bssid, reason_code, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
|
||||
@ -1279,7 +1355,7 @@ static void wmi_event_handle(struct wil6210_priv *wil,
|
||||
/* search for handler */
|
||||
if (!wmi_evt_call_handler(wil, id, evt_data,
|
||||
len - sizeof(*wmi))) {
|
||||
wil_err(wil, "Unhandled event 0x%04x\n", id);
|
||||
wil_info(wil, "Unhandled event 0x%04x\n", id);
|
||||
}
|
||||
} else {
|
||||
wil_err(wil, "Unknown event type\n");
|
||||
|
Loading…
x
Reference in New Issue
Block a user