Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for 4.19. Major changes:

wcn36xx

* fix WEP in client mode

wil6210

* add support for Talyn-MB (Talyn ver 2.0) device

* add support for enhanced DMA firmware feature
This commit is contained in:
Kalle Valo 2018-07-25 10:50:54 +03:00
commit bf9b608e63
38 changed files with 4614 additions and 770 deletions

View File

@ -274,7 +274,7 @@ ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
struct ath10k *ar = htc->ar;
int bundle_cnt = len / sizeof(*report);
if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) {
ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
bundle_cnt);
return -EINVAL;
@ -655,7 +655,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
htc->max_msgs_per_htc_bundle =
min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
HTC_HOST_MAX_MSG_PER_BUNDLE);
HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
ath10k_dbg(ar, ATH10K_DBG_HTC,
"Extended ready message. RX bundle size: %d\n",
htc->max_msgs_per_htc_bundle);

View File

@ -50,7 +50,8 @@ struct ath10k;
* 4-byte aligned.
*/
#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8
#define HTC_HOST_MAX_MSG_PER_TX_BUNDLE 16
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
@ -58,6 +59,7 @@ enum ath10k_htc_tx_flags {
};
enum ath10k_htc_rx_flags {
ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01,
ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
};

View File

@ -268,11 +268,12 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
htt->rx_ring.fill_cnt));
spin_unlock_bh(&htt->rx_ring.lock);
if (ret)
ath10k_htt_rx_ring_free(htt);
spin_unlock_bh(&htt->rx_ring.lock);
return ret;
}
@ -284,7 +285,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
skb_queue_purge(&htt->rx_in_ord_compl_q);
skb_queue_purge(&htt->tx_fetch_ind_q);
spin_lock_bh(&htt->rx_ring.lock);
ath10k_htt_rx_ring_free(htt);
spin_unlock_bh(&htt->rx_ring.lock);
dma_free_coherent(htt->ar->dev,
ath10k_htt_get_rx_ring_size(htt),
@ -1089,7 +1092,7 @@ static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
status = IEEE80211_SKB_RXCB(skb);
*status = *rx_status;
__skb_queue_tail(&ar->htt.rx_msdus_q, skb);
skb_queue_tail(&ar->htt.rx_msdus_q, skb);
}
static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
@ -2810,7 +2813,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
__skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
return false;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
@ -2874,7 +2877,7 @@ static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
if (skb_queue_empty(&ar->htt.rx_msdus_q))
break;
skb = __skb_dequeue(&ar->htt.rx_msdus_q);
skb = skb_dequeue(&ar->htt.rx_msdus_q);
if (!skb)
break;
ath10k_process_rx(ar, skb);
@ -2905,7 +2908,7 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
goto exit;
}
while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_in_ord_ind(ar, skb);
spin_unlock_bh(&htt->rx_ring.lock);

View File

@ -1056,7 +1056,7 @@ static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
return HTT_DATA_TX_EXT_TID_MGMT;
else if (cb->flags & ATH10K_SKB_F_QOS)
return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
else
return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
}

View File

@ -4026,7 +4026,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
drv_priv);
/* Prevent aggressive sta/tid taking over tx queue */
max = 16;
max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
ret = 0;
while (ath10k_mac_tx_can_push(hw, txq) && max--) {
ret = ath10k_mac_tx_push_txq(hw, txq);
@ -4047,6 +4047,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
rcu_read_unlock();
spin_unlock_bh(&ar->txqs_lock);
}
EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
/************/
/* Scanning */
@ -4287,7 +4288,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
struct ieee80211_txq *f_txq;
struct ath10k_txq *f_artxq;
int ret = 0;
int max = 16;
int max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
spin_lock_bh(&ar->txqs_lock);
if (list_empty(&artxq->list))

View File

@ -30,6 +30,7 @@
#include "debug.h"
#include "hif.h"
#include "htc.h"
#include "mac.h"
#include "targaddrs.h"
#include "trace.h"
#include "sdio.h"
@ -396,6 +397,7 @@ static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
int ret;
payload_len = le16_to_cpu(htc_hdr->len);
skb->len = payload_len + sizeof(struct ath10k_htc_hdr);
if (trailer_present) {
trailer = skb->data + sizeof(*htc_hdr) +
@ -434,12 +436,14 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
enum ath10k_htc_ep_id id;
int ret, i, *n_lookahead_local;
u32 *lookaheads_local;
int lookahead_idx = 0;
for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
lookaheads_local = lookaheads;
n_lookahead_local = n_lookahead;
id = ((struct ath10k_htc_hdr *)&lookaheads[i])->eid;
id = ((struct ath10k_htc_hdr *)
&lookaheads[lookahead_idx++])->eid;
if (id >= ATH10K_HTC_EP_COUNT) {
ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
@ -462,6 +466,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
/* Only read lookahead's from RX trailers
* for the last packet in a bundle.
*/
lookahead_idx--;
lookaheads_local = NULL;
n_lookahead_local = NULL;
}
@ -505,11 +510,11 @@ static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
*bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE) {
if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
ath10k_warn(ar,
"HTC bundle length %u exceeds maximum %u\n",
le16_to_cpu(htc_hdr->len),
HTC_HOST_MAX_MSG_PER_BUNDLE);
HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
return -ENOMEM;
}
@ -600,6 +605,9 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
* ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
* packet skb's have been allocated in the previous step.
*/
if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
act_len,
full_len,
@ -1342,6 +1350,8 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func)
break;
} while (time_before(jiffies, timeout) && !done);
ath10k_mac_tx_push_pending(ar);
sdio_claim_host(ar_sdio->func);
if (ret && ret != -ECANCELED)

View File

@ -96,14 +96,14 @@
* way:
*
* Let's assume that each packet in a bundle of the maximum bundle size
* (HTC_HOST_MAX_MSG_PER_BUNDLE) has the HTC header bundle count set
* to the maximum value (HTC_HOST_MAX_MSG_PER_BUNDLE).
* (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set
* to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
*
* in this case the driver must allocate
* (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE) skb's.
* (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) skb's.
*/
#define ATH10K_SDIO_MAX_RX_MSGS \
(HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE)
(HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE)
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF

View File

@ -1076,6 +1076,8 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = reg->eeprom_rd;
arg->low_5ghz_chan = reg->low_5ghz_chan;
arg->high_5ghz_chan = reg->high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = svc_bmap;
arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
@ -1614,10 +1616,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
ie_len = roundup(arg->ie_len, 4);
len = (sizeof(*tlv) + sizeof(*cmd)) +
(arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
(arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
(arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
(arg->ie_len ? sizeof(*tlv) + ie_len : 0);
sizeof(*tlv) + chan_len +
sizeof(*tlv) + ssid_len +
sizeof(*tlv) + bssid_len +
sizeof(*tlv) + ie_len;
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)

View File

@ -342,7 +342,7 @@ struct ath_chanctx {
struct ath_beacon_config beacon;
struct ath9k_hw_cal_data caldata;
struct timespec tsf_ts;
struct timespec64 tsf_ts;
u64 tsf_val;
u32 last_beacon;
@ -1021,7 +1021,7 @@ struct ath_softc {
struct ath_offchannel offchannel;
struct ath_chanctx *next_chan;
struct completion go_beacon;
struct timespec last_event_time;
struct timespec64 last_event_time;
#endif
unsigned long driver_data;

View File

@ -233,9 +233,9 @@ static const char *chanctx_state_string(enum ath_chanctx_state state)
static u32 chanctx_event_delta(struct ath_softc *sc)
{
u64 ms;
struct timespec ts, *old;
struct timespec64 ts, *old;
getrawmonotonic(&ts);
ktime_get_raw_ts64(&ts);
old = &sc->last_event_time;
ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000;
@ -334,7 +334,7 @@ ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx)
static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
{
struct ath_chanctx *prev, *cur;
struct timespec ts;
struct timespec64 ts;
u32 cur_tsf, prev_tsf, beacon_int;
s32 offset;
@ -346,7 +346,7 @@ static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
if (!prev->switch_after_beacon)
return;
getrawmonotonic(&ts);
ktime_get_raw_ts64(&ts);
cur_tsf = (u32) cur->tsf_val +
ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts);
@ -1230,7 +1230,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_chanctx *old_ctx;
struct timespec ts;
struct timespec64 ts;
bool measure_time = false;
bool send_ps = false;
bool queues_stopped = false;
@ -1260,7 +1260,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_unlock_bh(&sc->chan_lock);
if (sc->next_chan == &sc->offchannel.chan) {
getrawmonotonic(&ts);
ktime_get_raw_ts64(&ts);
measure_time = true;
}
@ -1277,7 +1277,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_lock_bh(&sc->chan_lock);
if (sc->cur_chan != &sc->offchannel.chan) {
getrawmonotonic(&sc->cur_chan->tsf_ts);
ktime_get_raw_ts64(&sc->cur_chan->tsf_ts);
sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah);
}
}

View File

@ -138,6 +138,7 @@ static void hif_usb_mgmt_cb(struct urb *urb)
{
struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
struct hif_device_usb *hif_dev;
unsigned long flags;
bool txok = true;
if (!cmd || !cmd->skb || !cmd->hif_dev)
@ -158,14 +159,14 @@ static void hif_usb_mgmt_cb(struct urb *urb)
* If the URBs are being flushed, no need to complete
* this packet.
*/
spin_lock(&hif_dev->tx.tx_lock);
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
spin_unlock(&hif_dev->tx.tx_lock);
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
dev_kfree_skb_any(cmd->skb);
kfree(cmd);
return;
}
spin_unlock(&hif_dev->tx.tx_lock);
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
break;
default:

View File

@ -1107,25 +1107,26 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
unsigned long flags;
spin_lock(&priv->rx.rxbuflock);
spin_lock_irqsave(&priv->rx.rxbuflock, flags);
list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
if (!tmp_buf->in_process) {
rxbuf = tmp_buf;
break;
}
}
spin_unlock(&priv->rx.rxbuflock);
spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
if (rxbuf == NULL) {
ath_dbg(common, ANY, "No free RX buffer\n");
goto err;
}
spin_lock(&priv->rx.rxbuflock);
spin_lock_irqsave(&priv->rx.rxbuflock, flags);
rxbuf->skb = skb;
rxbuf->in_process = true;
spin_unlock(&priv->rx.rxbuflock);
spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
tasklet_schedule(&priv->rx_tasklet);
return;

View File

@ -1835,13 +1835,13 @@ fail:
return -EINVAL;
}
u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur)
u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur)
{
struct timespec ts;
struct timespec64 ts;
s64 usec;
if (!cur) {
getrawmonotonic(&ts);
ktime_get_raw_ts64(&ts);
cur = &ts;
}
@ -1859,7 +1859,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 saveLedState;
u32 saveDefAntenna;
u32 macStaId1;
struct timespec tsf_ts;
struct timespec64 tsf_ts;
u32 tsf_offset;
u64 tsf = 0;
int r;
@ -1905,7 +1905,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
/* Save TSF before chip reset, a cold reset clears it */
getrawmonotonic(&tsf_ts);
ktime_get_raw_ts64(&tsf_ts);
tsf = ath9k_hw_gettsf64(ah);
saveLedState = REG_READ(ah, AR_CFG_LED) &

View File

@ -1060,7 +1060,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
u64 ath9k_hw_gettsf64(struct ath_hw *ah);
void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur);
u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);

View File

@ -1865,7 +1865,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
tsf -= le64_to_cpu(avp->tsf_adjust);
getrawmonotonic(&avp->chanctx->tsf_ts);
ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
if (sc->cur_chan == avp->chanctx)
ath9k_hw_settsf64(sc->sc_ah, tsf);
avp->chanctx->tsf_val = tsf;
@ -1881,7 +1881,7 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
getrawmonotonic(&avp->chanctx->tsf_ts);
ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
if (sc->cur_chan == avp->chanctx)
ath9k_hw_reset_tsf(sc->sc_ah);
avp->chanctx->tsf_val = 0;

View File

@ -209,6 +209,7 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
{
struct wmi *wmi = priv;
struct wmi_cmd_hdr *hdr;
unsigned long flags;
u16 cmd_id;
if (unlikely(wmi->stopped))
@ -218,20 +219,20 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
cmd_id = be16_to_cpu(hdr->command_id);
if (cmd_id & 0x1000) {
spin_lock(&wmi->wmi_lock);
spin_lock_irqsave(&wmi->wmi_lock, flags);
__skb_queue_tail(&wmi->wmi_event_queue, skb);
spin_unlock(&wmi->wmi_lock);
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
tasklet_schedule(&wmi->wmi_event_tasklet);
return;
}
/* Check if there has been a timeout. */
spin_lock(&wmi->wmi_lock);
spin_lock_irqsave(&wmi->wmi_lock, flags);
if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
spin_unlock(&wmi->wmi_lock);
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
goto free_skb;
}
spin_unlock(&wmi->wmi_lock);
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
/* WMI command response */
ath9k_wmi_rsp_callback(wmi, skb);

View File

@ -493,7 +493,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
struct wcn36xx_sta *sta_priv = sta ? wcn36xx_sta_to_priv(sta) : NULL;
int ret = 0;
u8 key[WLAN_MAX_KEY_LEN];
@ -512,7 +512,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
break;
case WLAN_CIPHER_SUITE_WEP104:
vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP104;
break;
case WLAN_CIPHER_SUITE_CCMP:
vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
@ -567,15 +567,19 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_conf->keyidx,
key_conf->keylen,
key);
if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
(WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
sta_priv->is_data_encrypted = true;
wcn36xx_smd_set_stakey(wcn,
vif_priv->encrypt_type,
key_conf->keyidx,
key_conf->keylen,
key,
get_sta_index(vif, sta_priv));
list_for_each_entry(sta_priv,
&vif_priv->sta_list, list) {
sta_priv->is_data_encrypted = true;
wcn36xx_smd_set_stakey(wcn,
vif_priv->encrypt_type,
key_conf->keyidx,
key_conf->keylen,
key,
get_sta_index(vif, sta_priv));
}
}
}
break;
@ -984,6 +988,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
mutex_lock(&wcn->conf_mutex);
vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
INIT_LIST_HEAD(&vif_priv->sta_list);
list_add(&vif_priv->list, &wcn->vif_list);
wcn36xx_smd_add_sta_self(wcn, vif);
@ -1005,6 +1010,8 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
spin_lock_init(&sta_priv->ampdu_lock);
sta_priv->vif = vif_priv;
list_add(&sta_priv->list, &vif_priv->sta_list);
/*
* For STA mode HW will be configured on BSS_CHANGED_ASSOC because
* at this stage AID is not available yet.
@ -1032,6 +1039,7 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
mutex_lock(&wcn->conf_mutex);
list_del(&sta_priv->list);
wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
sta_priv->vif = NULL;
@ -1153,8 +1161,6 @@ static const struct ieee80211_ops wcn36xx_ops = {
static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
{
int ret = 0;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
@ -1201,7 +1207,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
wiphy_ext_feature_set(wcn->hw->wiphy,
NL80211_EXT_FEATURE_CQM_RSSI_LIST);
return ret;
return 0;
}
static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,

View File

@ -250,7 +250,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
{
int ret = 0;
int ret;
unsigned long start;
struct wcn36xx_hal_msg_header *hdr =
(struct wcn36xx_hal_msg_header *)wcn->hal_buf;
@ -446,7 +446,7 @@ static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
int wcn36xx_smd_start(struct wcn36xx *wcn)
{
struct wcn36xx_hal_mac_start_req_msg msg_body, *body;
int ret = 0;
int ret;
int i;
size_t len;
@ -493,7 +493,7 @@ out:
int wcn36xx_smd_stop(struct wcn36xx *wcn)
{
struct wcn36xx_hal_mac_stop_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
@ -520,7 +520,7 @@ out:
int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
{
struct wcn36xx_hal_init_scan_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
@ -549,7 +549,7 @@ out:
int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel)
{
struct wcn36xx_hal_start_scan_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
@ -579,7 +579,7 @@ out:
int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel)
{
struct wcn36xx_hal_end_scan_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
@ -610,7 +610,7 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
enum wcn36xx_hal_sys_mode mode)
{
struct wcn36xx_hal_finish_scan_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
@ -732,7 +732,7 @@ out:
static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
{
struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
int ret = 0;
int ret;
ret = wcn36xx_smd_rsp_status_check(buf, len);
if (ret)
@ -747,7 +747,7 @@ int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
struct ieee80211_vif *vif, int ch)
{
struct wcn36xx_hal_switch_channel_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
@ -860,7 +860,7 @@ int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn,
u8 *channels, size_t channel_count)
{
struct wcn36xx_hal_update_scan_params_req_ex msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
@ -931,7 +931,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_add_sta_self_req msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
@ -965,7 +965,7 @@ out:
int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
{
struct wcn36xx_hal_del_sta_self_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
@ -993,7 +993,7 @@ out:
int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
{
struct wcn36xx_hal_delete_sta_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
@ -1040,7 +1040,7 @@ static int wcn36xx_smd_join_rsp(void *buf, size_t len)
int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
{
struct wcn36xx_hal_join_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
@ -1089,7 +1089,7 @@ int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
enum wcn36xx_hal_link_state state)
{
struct wcn36xx_hal_set_link_state_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
@ -1215,7 +1215,7 @@ int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
{
struct wcn36xx_hal_config_sta_req_msg msg;
struct wcn36xx_hal_config_sta_params *sta_params;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
@ -1414,7 +1414,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct wcn36xx_hal_config_bss_params *bss;
struct wcn36xx_hal_config_sta_params *sta_params;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
@ -1579,7 +1579,7 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
u16 p2p_off)
{
struct wcn36xx_hal_send_beacon_req_msg msg_body;
int ret = 0, pad, pvm_len;
int ret, pad, pvm_len;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
@ -1653,7 +1653,7 @@ int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
struct sk_buff *skb)
{
struct wcn36xx_hal_send_probe_resp_req_msg msg;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
@ -1700,7 +1700,7 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_set_sta_key_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
@ -1708,12 +1708,20 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
msg_body.set_sta_key_params.sta_index = sta_index;
msg_body.set_sta_key_params.enc_type = enc_type;
msg_body.set_sta_key_params.key[0].id = keyidx;
msg_body.set_sta_key_params.key[0].unicast = 1;
msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
msg_body.set_sta_key_params.key[0].pae_role = 0;
msg_body.set_sta_key_params.key[0].length = keylen;
memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
if (enc_type == WCN36XX_HAL_ED_WEP104 ||
enc_type == WCN36XX_HAL_ED_WEP40) {
/* Use bss key for wep (static) */
msg_body.set_sta_key_params.def_wep_idx = keyidx;
msg_body.set_sta_key_params.wep_type = 0;
} else {
msg_body.set_sta_key_params.key[0].id = keyidx;
msg_body.set_sta_key_params.key[0].unicast = 1;
msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
msg_body.set_sta_key_params.key[0].pae_role = 0;
msg_body.set_sta_key_params.key[0].length = keylen;
memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
}
msg_body.set_sta_key_params.single_tid_rc = 1;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@ -1741,7 +1749,7 @@ int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
u8 *key)
{
struct wcn36xx_hal_set_bss_key_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
@ -1778,7 +1786,7 @@ int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
@ -1810,7 +1818,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
u8 keyidx)
{
struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
@ -1839,7 +1847,7 @@ int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_enter_bmps_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
@ -1869,7 +1877,7 @@ int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_exit_bmps_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
@ -1895,7 +1903,7 @@ out:
int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
{
struct wcn36xx_hal_set_power_params_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
@ -1930,7 +1938,7 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
{
struct wcn36xx_hal_keep_alive_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
@ -1968,7 +1976,7 @@ int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 arg5)
{
struct wcn36xx_hal_dump_cmd_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
@ -2013,7 +2021,6 @@ void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
int ret = 0;
if (cap < 0 || cap > 127) {
wcn36xx_warn("error cap idx %d\n", cap);
@ -2022,8 +2029,8 @@ int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
arr_idx = cap / 32;
bit_idx = cap % 32;
ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
return ret;
return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
}
void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
@ -2043,7 +2050,7 @@ void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
{
struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
int ret = 0, i;
int ret, i;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
@ -2079,7 +2086,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_add_ba_session_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
@ -2117,7 +2124,7 @@ out:
int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
{
struct wcn36xx_hal_add_ba_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
@ -2145,7 +2152,7 @@ out:
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
{
struct wcn36xx_hal_del_ba_req_msg msg_body;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
@ -2185,7 +2192,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
{
struct wcn36xx_hal_trigger_ba_req_msg msg_body;
struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
@ -2364,7 +2371,7 @@ int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
{
struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
size_t len;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
@ -2399,7 +2406,7 @@ int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL;
int ret = 0;
int ret;
mutex_lock(&wcn->hal_mutex);

View File

@ -129,6 +129,8 @@ struct wcn36xx_vif {
u8 self_sta_index;
u8 self_dpu_desc_index;
u8 self_ucast_dpu_sign;
struct list_head sta_list;
};
/**
@ -154,6 +156,7 @@ struct wcn36xx_vif {
* |______________|_____________|_______________|
*/
struct wcn36xx_sta {
struct list_head list;
struct wcn36xx_vif *vif;
u16 aid;
u16 tid;

View File

@ -9,6 +9,7 @@ wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o
wil6210-y += wmi.o
wil6210-y += interrupt.o
wil6210-y += txrx.o
wil6210-y += txrx_edma.o
wil6210-y += debug.o
wil6210-y += rx_reorder.o
wil6210-y += fw.o

View File

@ -1726,7 +1726,7 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int authorize;
int cid, i;
struct vring_tx_data *txdata = NULL;
struct wil_ring_tx_data *txdata = NULL;
wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x mid %d\n",
mac, params->sta_flags_mask, params->sta_flags_set,
@ -1746,20 +1746,20 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
return -ENOLINK;
}
for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++)
if (wil->vring2cid_tid[i][0] == cid) {
txdata = &wil->vring_tx_data[i];
for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++)
if (wil->ring2cid_tid[i][0] == cid) {
txdata = &wil->ring_tx_data[i];
break;
}
if (!txdata) {
wil_err(wil, "vring data not found\n");
wil_err(wil, "ring data not found\n");
return -ENOLINK;
}
authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED);
txdata->dot1x_open = authorize ? 1 : 0;
wil_dbg_misc(wil, "cid %d vring %d authorize %d\n", cid, i,
wil_dbg_misc(wil, "cid %d ring %d authorize %d\n", cid, i,
txdata->dot1x_open);
return 0;

View File

@ -29,7 +29,10 @@
/* Nasty hack. Better have per device instances */
static u32 mem_addr;
static u32 dbg_txdesc_index;
static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
static u32 dbg_ring_index; /* 24+ for Rx, 0..23 for Tx */
static u32 dbg_status_msg_index;
/* 0..wil->num_rx_status_rings-1 for Rx, wil->tx_sring_idx for Tx */
static u32 dbg_sring_index;
enum dbg_off_type {
doff_u32 = 0,
@ -47,20 +50,53 @@ struct dbg_off {
enum dbg_off_type type;
};
static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
const char *name, struct vring *vring,
char _s, char _h)
static void wil_print_desc_edma(struct seq_file *s, struct wil6210_priv *wil,
struct wil_ring *ring,
char _s, char _h, int idx)
{
void __iomem *x = wmi_addr(wil, vring->hwtail);
u8 num_of_descs;
bool has_skb = false;
if (ring->is_rx) {
struct wil_rx_enhanced_desc *rx_d =
(struct wil_rx_enhanced_desc *)
&ring->va[idx].rx.enhanced;
u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
has_skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
seq_printf(s, "%c", (has_skb) ? _h : _s);
} else {
struct wil_tx_enhanced_desc *d =
(struct wil_tx_enhanced_desc *)
&ring->va[idx].tx.enhanced;
num_of_descs = (u8)d->mac.d[2];
has_skb = ring->ctx[idx].skb;
if (num_of_descs >= 1)
seq_printf(s, "%c", ring->ctx[idx].skb ? _h : _s);
else
/* num_of_descs == 0, it's a frag in a list of descs */
seq_printf(s, "%c", has_skb ? 'h' : _s);
}
}
static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil,
const char *name, struct wil_ring *ring,
char _s, char _h)
{
void __iomem *x = wmi_addr(wil, ring->hwtail);
u32 v;
seq_printf(s, "VRING %s = {\n", name);
seq_printf(s, " pa = %pad\n", &vring->pa);
seq_printf(s, " va = 0x%p\n", vring->va);
seq_printf(s, " size = %d\n", vring->size);
seq_printf(s, " swtail = %d\n", vring->swtail);
seq_printf(s, " swhead = %d\n", vring->swhead);
seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
seq_printf(s, "RING %s = {\n", name);
seq_printf(s, " pa = %pad\n", &ring->pa);
seq_printf(s, " va = 0x%p\n", ring->va);
seq_printf(s, " size = %d\n", ring->size);
if (wil->use_enhanced_dma_hw && ring->is_rx)
seq_printf(s, " swtail = %u\n", *ring->edma_rx_swtail.va);
else
seq_printf(s, " swtail = %d\n", ring->swtail);
seq_printf(s, " swhead = %d\n", ring->swhead);
seq_printf(s, " hwtail = [0x%08x] -> ", ring->hwtail);
if (x) {
v = readl(x);
seq_printf(s, "0x%08x = %d\n", v, v);
@ -68,41 +104,45 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
seq_puts(s, "???\n");
}
if (vring->va && (vring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
if (ring->va && (ring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
uint i;
for (i = 0; i < vring->size; i++) {
volatile struct vring_tx_desc *d = &vring->va[i].tx;
if ((i % 128) == 0 && (i != 0))
for (i = 0; i < ring->size; i++) {
if ((i % 128) == 0 && i != 0)
seq_puts(s, "\n");
seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
_s : (vring->ctx[i].skb ? _h : 'h'));
if (wil->use_enhanced_dma_hw) {
wil_print_desc_edma(s, wil, ring, _s, _h, i);
} else {
volatile struct vring_tx_desc *d =
&ring->va[i].tx.legacy;
seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
_s : (ring->ctx[i].skb ? _h : 'h'));
}
}
seq_puts(s, "\n");
}
seq_puts(s, "}\n");
}
static int wil_vring_debugfs_show(struct seq_file *s, void *data)
static int wil_ring_debugfs_show(struct seq_file *s, void *data)
{
uint i;
struct wil6210_priv *wil = s->private;
wil_print_vring(s, wil, "rx", &wil->vring_rx, 'S', '_');
wil_print_ring(s, wil, "rx", &wil->ring_rx, 'S', '_');
for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
struct vring *vring = &wil->vring_tx[i];
struct vring_tx_data *txdata = &wil->vring_tx_data[i];
for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
struct wil_ring *ring = &wil->ring_tx[i];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
if (vring->va) {
int cid = wil->vring2cid_tid[i][0];
int tid = wil->vring2cid_tid[i][1];
u32 swhead = vring->swhead;
u32 swtail = vring->swtail;
int used = (vring->size + swhead - swtail)
% vring->size;
int avail = vring->size - used - 1;
if (ring->va) {
int cid = wil->ring2cid_tid[i][0];
int tid = wil->ring2cid_tid[i][1];
u32 swhead = ring->swhead;
u32 swtail = ring->swtail;
int used = (ring->size + swhead - swtail)
% ring->size;
int avail = ring->size - used - 1;
char name[10];
char sidle[10];
/* performance monitoring */
@ -137,20 +177,88 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
txdata->dot1x_open ? "+" : "-",
used, avail, sidle);
wil_print_vring(s, wil, name, vring, '_', 'H');
wil_print_ring(s, wil, name, ring, '_', 'H');
}
}
return 0;
}
static int wil_vring_seq_open(struct inode *inode, struct file *file)
static int wil_ring_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, wil_vring_debugfs_show, inode->i_private);
return single_open(file, wil_ring_debugfs_show, inode->i_private);
}
static const struct file_operations fops_vring = {
.open = wil_vring_seq_open,
static const struct file_operations fops_ring = {
.open = wil_ring_seq_open,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek,
};
static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
struct wil_status_ring *sring)
{
void __iomem *x = wmi_addr(wil, sring->hwtail);
int sring_idx = sring - wil->srings;
u32 v;
seq_printf(s, "Status Ring %s [ %d ] = {\n",
sring->is_rx ? "RX" : "TX", sring_idx);
seq_printf(s, " pa = %pad\n", &sring->pa);
seq_printf(s, " va = 0x%pK\n", sring->va);
seq_printf(s, " size = %d\n", sring->size);
seq_printf(s, " elem_size = %zu\n", sring->elem_size);
seq_printf(s, " swhead = %d\n", sring->swhead);
seq_printf(s, " hwtail = [0x%08x] -> ", sring->hwtail);
if (x) {
v = readl_relaxed(x);
seq_printf(s, "0x%08x = %d\n", v, v);
} else {
seq_puts(s, "???\n");
}
seq_printf(s, " desc_rdy_pol = %d\n", sring->desc_rdy_pol);
if (sring->va && (sring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
uint i;
for (i = 0; i < sring->size; i++) {
u32 *sdword_0 =
(u32 *)(sring->va + (sring->elem_size * i));
if ((i % 128) == 0 && i != 0)
seq_puts(s, "\n");
if (i == sring->swhead)
seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
'X' : 'x');
else
seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
'1' : '0');
}
seq_puts(s, "\n");
}
seq_puts(s, "}\n");
}
static int wil_srings_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
int i = 0;
for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++)
if (wil->srings[i].va)
wil_print_sring(s, wil, &wil->srings[i]);
return 0;
}
static int wil_srings_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, wil_srings_debugfs_show, inode->i_private);
}
static const struct file_operations fops_srings = {
.open = wil_srings_seq_open,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek,
@ -162,8 +270,8 @@ static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false);
}
static void wil_print_ring(struct seq_file *s, const char *prefix,
void __iomem *off)
static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
void __iomem *off)
{
struct wil6210_priv *wil = s->private;
struct wil6210_mbox_ring r;
@ -249,9 +357,9 @@ static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
if (ret < 0)
return ret;
wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
wil_print_mbox_ring(s, "tx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, tx));
wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
wil_print_mbox_ring(s, "rx", wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, rx));
wil_pm_runtime_put(wil);
@ -719,13 +827,13 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf,
if ((strcmp(cmd, "add") == 0) ||
(strcmp(cmd, "del_tx") == 0)) {
struct vring_tx_data *txdata;
struct wil_ring_tx_data *txdata;
if (p1 < 0 || p1 >= WIL6210_MAX_TX_RINGS) {
wil_err(wil, "BACK: invalid ring id %d\n", p1);
return -EINVAL;
}
txdata = &wil->vring_tx_data[p1];
txdata = &wil->ring_tx_data[p1];
if (strcmp(cmd, "add") == 0) {
if (rc < 3) {
wil_err(wil, "BACK: add require at least 2 params\n");
@ -972,55 +1080,94 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct vring *vring;
bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS);
struct wil_ring *ring;
bool tx;
int ring_idx = dbg_ring_index;
int txdesc_idx = dbg_txdesc_index;
volatile struct vring_tx_desc *d;
volatile u32 *u;
struct sk_buff *skb;
vring = tx ? &wil->vring_tx[dbg_vring_index] : &wil->vring_rx;
if (wil->use_enhanced_dma_hw) {
/* RX ring index == 0 */
if (ring_idx >= WIL6210_MAX_TX_RINGS) {
seq_printf(s, "invalid ring index %d\n", ring_idx);
return 0;
}
tx = ring_idx > 0; /* desc ring 0 is reserved for RX */
} else {
/* RX ring index == WIL6210_MAX_TX_RINGS */
if (ring_idx > WIL6210_MAX_TX_RINGS) {
seq_printf(s, "invalid ring index %d\n", ring_idx);
return 0;
}
tx = (ring_idx < WIL6210_MAX_TX_RINGS);
}
if (!vring->va) {
ring = tx ? &wil->ring_tx[ring_idx] : &wil->ring_rx;
if (!ring->va) {
if (tx)
seq_printf(s, "No Tx[%2d] VRING\n", dbg_vring_index);
seq_printf(s, "No Tx[%2d] RING\n", ring_idx);
else
seq_puts(s, "No Rx VRING\n");
seq_puts(s, "No Rx RING\n");
return 0;
}
if (dbg_txdesc_index < vring->size) {
/* use struct vring_tx_desc for Rx as well,
* only field used, .dma.length, is the same
*/
volatile struct vring_tx_desc *d =
&vring->va[dbg_txdesc_index].tx;
volatile u32 *u = (volatile u32 *)d;
struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
if (tx)
seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_vring_index,
dbg_txdesc_index);
else
seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index);
seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[0], u[1], u[2], u[3]);
seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[4], u[5], u[6], u[7]);
seq_printf(s, " SKB = 0x%p\n", skb);
if (skb) {
skb_get(skb);
wil_seq_print_skb(s, skb);
kfree_skb(skb);
}
seq_puts(s, "}\n");
} else {
if (txdesc_idx >= ring->size) {
if (tx)
seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
dbg_vring_index, dbg_txdesc_index,
vring->size);
ring_idx, txdesc_idx, ring->size);
else
seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
dbg_txdesc_index, vring->size);
txdesc_idx, ring->size);
return 0;
}
/* use struct vring_tx_desc for Rx as well,
* only field used, .dma.length, is the same
*/
d = &ring->va[txdesc_idx].tx.legacy;
u = (volatile u32 *)d;
skb = NULL;
if (wil->use_enhanced_dma_hw) {
if (tx) {
skb = ring->ctx[txdesc_idx].skb;
} else {
struct wil_rx_enhanced_desc *rx_d =
(struct wil_rx_enhanced_desc *)
&ring->va[txdesc_idx].rx.enhanced;
u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
if (!wil_val_in_range(buff_id, 0,
wil->rx_buff_mgmt.size)) {
seq_printf(s, "invalid buff_id %d\n", buff_id);
return 0;
}
skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
}
} else {
skb = ring->ctx[txdesc_idx].skb;
}
if (tx)
seq_printf(s, "Tx[%2d][%3d] = {\n", ring_idx,
txdesc_idx);
else
seq_printf(s, "Rx[%3d] = {\n", txdesc_idx);
seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[0], u[1], u[2], u[3]);
seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[4], u[5], u[6], u[7]);
seq_printf(s, " SKB = 0x%p\n", skb);
if (skb) {
skb_get(skb);
wil_seq_print_skb(s, skb);
kfree_skb(skb);
}
seq_puts(s, "}\n");
return 0;
}
@ -1036,6 +1183,115 @@ static const struct file_operations fops_txdesc = {
.llseek = seq_lseek,
};
/*---------Tx/Rx status message------------*/
static int wil_status_msg_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
int sring_idx = dbg_sring_index;
struct wil_status_ring *sring;
bool tx = sring_idx == wil->tx_sring_idx ? 1 : 0;
u32 status_msg_idx = dbg_status_msg_index;
u32 *u;
if (sring_idx >= WIL6210_MAX_STATUS_RINGS) {
seq_printf(s, "invalid status ring index %d\n", sring_idx);
return 0;
}
sring = &wil->srings[sring_idx];
if (!sring->va) {
seq_printf(s, "No %cX status ring\n", tx ? 'T' : 'R');
return 0;
}
if (status_msg_idx >= sring->size) {
seq_printf(s, "%cxDesc index (%d) >= size (%d)\n",
tx ? 'T' : 'R', status_msg_idx, sring->size);
return 0;
}
u = sring->va + (sring->elem_size * status_msg_idx);
seq_printf(s, "%cx[%d][%3d] = {\n",
tx ? 'T' : 'R', sring_idx, status_msg_idx);
seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[0], u[1], u[2], u[3]);
if (!tx && !wil->use_compressed_rx_status)
seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[4], u[5], u[6], u[7]);
seq_puts(s, "}\n");
return 0;
}
static int wil_status_msg_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, wil_status_msg_debugfs_show,
inode->i_private);
}
static const struct file_operations fops_status_msg = {
.open = wil_status_msg_seq_open,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek,
};
static int wil_print_rx_buff(struct seq_file *s, struct list_head *lh)
{
struct wil_rx_buff *it;
int i = 0;
list_for_each_entry(it, lh, list) {
if ((i % 16) == 0 && i != 0)
seq_puts(s, "\n ");
seq_printf(s, "[%4d] ", it->id);
i++;
}
seq_printf(s, "\nNumber of buffers: %u\n", i);
return i;
}
static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct wil_rx_buff_mgmt *rbm = &wil->rx_buff_mgmt;
int num_active;
int num_free;
seq_printf(s, " size = %zu\n", rbm->size);
seq_printf(s, " free_list_empty_cnt = %lu\n",
rbm->free_list_empty_cnt);
/* Print active list */
seq_puts(s, " Active list:\n");
num_active = wil_print_rx_buff(s, &rbm->active);
seq_puts(s, "\n Free list:\n");
num_free = wil_print_rx_buff(s, &rbm->free);
seq_printf(s, " Total number of buffers: %u\n",
num_active + num_free);
return 0;
}
static int wil_rx_buff_mgmt_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, wil_rx_buff_mgmt_debugfs_show,
inode->i_private);
}
static const struct file_operations fops_rx_buff_mgmt = {
.open = wil_rx_buff_mgmt_seq_open,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek,
};
/*---------beamforming------------*/
static char *wil_bfstatus_str(u32 status)
{
@ -1478,6 +1734,13 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
p->stats.rx_large_frame,
p->stats.rx_replay);
if (wil->use_enhanced_dma_hw)
seq_printf(s,
"mic error %lu, key error %lu, amsdu error %lu\n",
p->stats.rx_mic_error,
p->stats.rx_key_error,
p->stats.rx_amsdu_error);
seq_puts(s, "Rx/MCS:");
for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
mcs++)
@ -1760,6 +2023,60 @@ static const struct file_operations fops_suspend_stats = {
.open = simple_open,
};
/*---------compressed_rx_status---------*/
static ssize_t wil_compressed_rx_status_write(struct file *file,
const char __user *buf,
size_t len, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct wil6210_priv *wil = s->private;
int compressed_rx_status;
int rc;
rc = kstrtoint_from_user(buf, len, 0, &compressed_rx_status);
if (rc) {
wil_err(wil, "Invalid argument\n");
return rc;
}
if (wil_has_active_ifaces(wil, true, false)) {
wil_err(wil, "cannot change edma config after iface is up\n");
return -EPERM;
}
wil_info(wil, "%sable compressed_rx_status\n",
compressed_rx_status ? "En" : "Dis");
wil->use_compressed_rx_status = compressed_rx_status;
return len;
}
static int
wil_compressed_rx_status_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
seq_printf(s, "%d\n", wil->use_compressed_rx_status);
return 0;
}
static int
wil_compressed_rx_status_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, wil_compressed_rx_status_show,
inode->i_private);
}
static const struct file_operations fops_compressed_rx_status = {
.open = wil_compressed_rx_status_seq_open,
.release = single_release,
.read = seq_read,
.write = wil_compressed_rx_status_write,
.llseek = seq_lseek,
};
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@ -1790,7 +2107,7 @@ static const struct {
const struct file_operations *fops;
} dbg_files[] = {
{"mbox", 0444, &fops_mbox},
{"vrings", 0444, &fops_vring},
{"rings", 0444, &fops_ring},
{"stations", 0444, &fops_sta},
{"mids", 0444, &fops_mids},
{"desc", 0444, &fops_txdesc},
@ -1813,6 +2130,10 @@ static const struct {
{"fw_capabilities", 0444, &fops_fw_capabilities},
{"fw_version", 0444, &fops_fw_version},
{"suspend_stats", 0644, &fops_suspend_stats},
{"compressed_rx_status", 0644, &fops_compressed_rx_status},
{"srings", 0444, &fops_srings},
{"status_msg", 0444, &fops_status_msg},
{"rx_buff_mgmt", 0444, &fops_rx_buff_mgmt},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@ -1858,7 +2179,12 @@ static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(chip_revision, 0444, doff_u8),
WIL_FIELD(abft_len, 0644, doff_u8),
WIL_FIELD(wakeup_trigger, 0644, doff_u8),
WIL_FIELD(vring_idle_trsh, 0644, doff_u32),
WIL_FIELD(ring_idle_trsh, 0644, doff_u32),
WIL_FIELD(num_rx_status_rings, 0644, doff_u8),
WIL_FIELD(rx_status_ring_order, 0644, doff_u32),
WIL_FIELD(tx_status_ring_order, 0644, doff_u32),
WIL_FIELD(rx_buff_id_count, 0644, doff_u32),
WIL_FIELD(amsdu_en, 0644, doff_u8),
{},
};
@ -1872,9 +2198,11 @@ static const struct dbg_off dbg_wil_regs[] = {
/* static parameters */
static const struct dbg_off dbg_statics[] = {
{"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32},
{"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32},
{"ring_index", 0644, (ulong)&dbg_ring_index, doff_u32},
{"mem_addr", 0644, (ulong)&mem_addr, doff_u32},
{"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
{"status_index", 0644, (ulong)&dbg_status_msg_index, doff_u32},
{"sring_index", 0644, (ulong)&dbg_sring_index, doff_u32},
{},
};

View File

@ -101,7 +101,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
if (ret < 0)
return ret;
wil_configure_interrupt_moderation(wil);
wil->txrx_ops.configure_interrupt_moderation(wil);
wil_pm_runtime_put(wil);

View File

@ -44,6 +44,8 @@
(~(BIT_DMA_EP_RX_ICR_RX_HTRSH)))
#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
#define WIL6210_IMC_TX_EDMA BIT_TX_STATUS_IRQ
#define WIL6210_IMC_RX_EDMA BIT_RX_STATUS_IRQ
#define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \
ISR_MISC_MBOX_EVT | \
ISR_MISC_FW_ERROR)
@ -87,12 +89,24 @@ static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_tx_edma(struct wil6210_priv *wil)
{
wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
{
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_rx_edma(struct wil6210_priv *wil)
{
wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMS),
WIL6210_IRQ_DISABLE);
}
static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
{
wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n",
@ -125,6 +139,12 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
WIL6210_IMC_TX);
}
void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil)
{
wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMC),
WIL6210_IMC_TX_EDMA);
}
void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
{
bool unmask_rx_htrsh = atomic_read(&wil->connected_vifs) > 0;
@ -133,6 +153,12 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH);
}
void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil)
{
wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMC),
WIL6210_IMC_RX_EDMA);
}
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
{
wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n",
@ -164,7 +190,9 @@ void wil_mask_irq(struct wil6210_priv *wil)
wil_dbg_irq(wil, "mask_irq\n");
wil6210_mask_irq_tx(wil);
wil6210_mask_irq_tx_edma(wil);
wil6210_mask_irq_rx(wil);
wil6210_mask_irq_rx_edma(wil);
wil6210_mask_irq_misc(wil, true);
wil6210_mask_irq_pseudo(wil);
}
@ -179,13 +207,43 @@ void wil_unmask_irq(struct wil6210_priv *wil)
WIL_ICR_ICC_VALUE);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_MISC_VALUE);
wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
wil6210_unmask_irq_pseudo(wil);
wil6210_unmask_irq_tx(wil);
wil6210_unmask_irq_rx(wil);
if (wil->use_enhanced_dma_hw) {
wil6210_unmask_irq_tx_edma(wil);
wil6210_unmask_irq_rx_edma(wil);
} else {
wil6210_unmask_irq_tx(wil);
wil6210_unmask_irq_rx(wil);
}
wil6210_unmask_irq_misc(wil, true);
}
void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil)
{
u32 moderation;
wil_s(wil, RGF_INT_GEN_IDLE_TIME_LIMIT, WIL_EDMA_IDLE_TIME_LIMIT_USEC);
wil_s(wil, RGF_INT_GEN_TIME_UNIT_LIMIT, WIL_EDMA_TIME_UNIT_CLK_CYCLES);
/* Update RX and TX moderation */
moderation = wil->rx_max_burst_duration |
(WIL_EDMA_AGG_WATERMARK << WIL_EDMA_AGG_WATERMARK_POS);
wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_0, moderation);
wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_1, moderation);
/* Treat special events as regular
* (set bit 0 to 0x1 and clear bits 1-8)
*/
wil_c(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1FE);
wil_s(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1);
}
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
@ -294,6 +352,97 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
return IRQ_HANDLED;
}
static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
u32 isr = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_INT_GEN_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
bool need_unmask = true;
trace_wil6210_irq_rx(isr);
wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
if (unlikely(!isr)) {
wil_err(wil, "spurious IRQ: RX\n");
return IRQ_NONE;
}
wil6210_mask_irq_rx_edma(wil);
if (likely(isr & BIT_RX_STATUS_IRQ)) {
wil_dbg_irq(wil, "RX status ring\n");
isr &= ~BIT_RX_STATUS_IRQ;
if (likely(test_bit(wil_status_fwready, wil->status))) {
if (likely(test_bit(wil_status_napi_en, wil->status))) {
wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
need_unmask = false;
napi_schedule(&wil->napi_rx);
} else {
wil_err(wil,
"Got Rx interrupt while stopping interface\n");
}
} else {
wil_err(wil, "Got Rx interrupt while in reset\n");
}
}
if (unlikely(isr))
wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
/* Rx IRQ will be enabled when NAPI processing finished */
atomic_inc(&wil->isr_count_rx);
if (unlikely(need_unmask))
wil6210_unmask_irq_rx_edma(wil);
return IRQ_HANDLED;
}
static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
u32 isr = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_INT_GEN_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
bool need_unmask = true;
trace_wil6210_irq_tx(isr);
wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
if (unlikely(!isr)) {
wil_err(wil, "spurious IRQ: TX\n");
return IRQ_NONE;
}
wil6210_mask_irq_tx_edma(wil);
if (likely(isr & BIT_TX_STATUS_IRQ)) {
wil_dbg_irq(wil, "TX status ring\n");
isr &= ~BIT_TX_STATUS_IRQ;
if (likely(test_bit(wil_status_fwready, wil->status))) {
wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
need_unmask = false;
napi_schedule(&wil->napi_tx);
} else {
wil_err(wil, "Got Tx status ring IRQ while in reset\n");
}
}
if (unlikely(isr))
wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
/* Tx IRQ will be enabled when NAPI processing finished */
atomic_inc(&wil->isr_count_tx);
if (unlikely(need_unmask))
wil6210_unmask_irq_tx_edma(wil);
return IRQ_HANDLED;
}
static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@ -510,30 +659,53 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
*/
static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
{
u32 icm_rx, icr_rx, imv_rx;
u32 icm_tx, icr_tx, imv_tx;
u32 icm_misc, icr_misc, imv_misc;
if (!test_bit(wil_status_irqen, wil->status)) {
u32 icm_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICM));
u32 icr_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
if (wil->use_enhanced_dma_hw) {
icm_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_INT_GEN_RX_ICR) +
offsetof(struct RGF_ICR, ICM));
icr_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_INT_GEN_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
imv_rx = wil_r(wil, RGF_INT_GEN_RX_ICR +
offsetof(struct RGF_ICR, IMV));
u32 icm_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICM));
u32 icr_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
icm_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_INT_GEN_TX_ICR) +
offsetof(struct RGF_ICR, ICM));
icr_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_INT_GEN_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
imv_tx = wil_r(wil, RGF_INT_GEN_TX_ICR +
offsetof(struct RGF_ICR, IMV));
} else {
icm_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICM));
icr_rx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
offsetof(struct RGF_ICR, IMV));
u32 icm_misc = wil_ioread32_and_clear(wil->csr +
icm_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICM));
icr_tx = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
offsetof(struct RGF_ICR, IMV));
}
icm_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICM));
u32 icr_misc = wil_ioread32_and_clear(wil->csr +
icr_misc = wil_ioread32_and_clear(wil->csr +
HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
offsetof(struct RGF_ICR, IMV));
/* HALP interrupt can be unmasked when misc interrupts are
@ -592,11 +764,11 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
* voting for wake thread - need at least 1 vote
*/
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
(wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
(wil->txrx_ops.irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
rc = IRQ_WAKE_THREAD;
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
(wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
(wil->txrx_ops.irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
rc = IRQ_WAKE_THREAD;
if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
@ -624,6 +796,10 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
wmb(); /* make sure write completed */
@ -652,6 +828,13 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx");
if (wil->use_enhanced_dma_hw) {
wil->txrx_ops.irq_tx = wil6210_irq_tx_edma;
wil->txrx_ops.irq_rx = wil6210_irq_rx_edma;
} else {
wil->txrx_ops.irq_tx = wil6210_irq_tx;
wil->txrx_ops.irq_rx = wil6210_irq_rx;
}
rc = request_threaded_irq(irq, wil6210_hardirq,
wil6210_thread_irq,
use_msi ? 0 : IRQF_SHARED,

View File

@ -21,11 +21,13 @@
#include "wil6210.h"
#include "txrx.h"
#include "txrx_edma.h"
#include "wmi.h"
#include "boot_loader.h"
#define WAIT_FOR_HALP_VOTE_MS 100
#define WAIT_FOR_SCAN_ABORT_MS 1000
#define WIL_DEFAULT_NUM_RX_STATUS_RINGS 1
bool debug_fw; /* = false; */
module_param(debug_fw, bool, 0444);
@ -110,9 +112,29 @@ MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444);
MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order");
#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */
enum {
WIL_BOOT_ERR,
WIL_BOOT_VANILLA,
WIL_BOOT_PRODUCTION,
WIL_BOOT_DEVELOPMENT,
};
enum {
WIL_SIG_STATUS_VANILLA = 0x0,
WIL_SIG_STATUS_DEVELOPMENT = 0x1,
WIL_SIG_STATUS_PRODUCTION = 0x2,
WIL_SIG_STATUS_CORRUPTED_PRODUCTION = 0x3,
};
#define RST_DELAY (20) /* msec, for loop in @wil_wait_device_ready */
#define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */
#define PMU_READY_DELAY_MS (4) /* ms, for sleep in @wil_wait_device_ready */
#define OTP_HW_DELAY (200) /* usec, loop in @wil_wait_device_ready_talyn_mb */
/* round up to be above 2 ms total */
#define OTP_HW_COUNT (1 + 2000 / OTP_HW_DELAY)
/*
* Due to a hardware issue,
* one has to read/write to/from NIC in 32-bit chunks;
@ -160,6 +182,37 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
}
}
static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
{
struct wil_ring *ring = &wil->ring_tx[id];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
lockdep_assert_held(&wil->mutex);
if (!ring->va)
return;
wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
txdata->mid = U8_MAX;
txdata->enabled = 0; /* no Tx can be in progress or start anew */
spin_unlock_bh(&txdata->lock);
/* napi_synchronize waits for completion of the current NAPI but will
* not prevent the next NAPI run.
* Add a memory barrier to guarantee that txdata->enabled is zeroed
* before napi_synchronize so that the next scheduled NAPI will not
* handle this vring
*/
wmb();
/* make sure NAPI won't touch this vring */
if (test_bit(wil_status_napi_en, wil->status))
napi_synchronize(&wil->napi_tx);
wil->txrx_ops.ring_fini_tx(wil, ring);
}
static void wil_disconnect_cid(struct wil6210_vif *vif, int cid,
u16 reason_code, bool from_event)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
@ -219,9 +272,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
/* release vrings */
for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
if (wil->vring2cid_tid[i][0] == cid)
wil_vring_fini_tx(wil, i);
for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
if (wil->ring2cid_tid[i][0] == cid)
wil_ring_fini_tx(wil, i);
}
/* statistics */
memset(&sta->stats, 0, sizeof(sta->stats));
@ -453,18 +506,19 @@ static void wil_fw_error_worker(struct work_struct *work)
mutex_unlock(&wil->mutex);
}
static int wil_find_free_vring(struct wil6210_priv *wil)
static int wil_find_free_ring(struct wil6210_priv *wil)
{
int i;
int min_ring_id = wil_get_min_tx_ring_id(wil);
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
if (!wil->vring_tx[i].va)
for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
if (!wil->ring_tx[i].va)
return i;
}
return -EINVAL;
}
int wil_tx_init(struct wil6210_vif *vif, int cid)
int wil_ring_init_tx(struct wil6210_vif *vif, int cid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc = -EINVAL, ringid;
@ -473,16 +527,17 @@ int wil_tx_init(struct wil6210_vif *vif, int cid)
wil_err(wil, "No connection pending\n");
goto out;
}
ringid = wil_find_free_vring(wil);
ringid = wil_find_free_ring(wil);
if (ringid < 0) {
wil_err(wil, "No free vring found\n");
goto out;
}
wil_dbg_wmi(wil, "Configure for connection CID %d MID %d vring %d\n",
wil_dbg_wmi(wil, "Configure for connection CID %d MID %d ring %d\n",
cid, vif->mid, ringid);
rc = wil_vring_init_tx(vif, ringid, 1 << tx_ring_order, cid, 0);
rc = wil->txrx_ops.ring_init_tx(vif, ringid, 1 << tx_ring_order,
cid, 0);
if (rc)
wil_err(wil, "init TX for CID %d MID %d vring %d failed\n",
cid, vif->mid, ringid);
@ -494,19 +549,19 @@ out:
int wil_bcast_init(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int ri = vif->bcast_vring, rc;
int ri = vif->bcast_ring, rc;
if ((ri >= 0) && wil->vring_tx[ri].va)
if (ri >= 0 && wil->ring_tx[ri].va)
return 0;
ri = wil_find_free_vring(wil);
ri = wil_find_free_ring(wil);
if (ri < 0)
return ri;
vif->bcast_vring = ri;
rc = wil_vring_init_bcast(vif, ri, 1 << bcast_ring_order);
vif->bcast_ring = ri;
rc = wil->txrx_ops.ring_init_bcast(vif, ri, 1 << bcast_ring_order);
if (rc)
vif->bcast_vring = -1;
vif->bcast_ring = -1;
return rc;
}
@ -514,13 +569,13 @@ int wil_bcast_init(struct wil6210_vif *vif)
void wil_bcast_fini(struct wil6210_vif *vif)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int ri = vif->bcast_vring;
int ri = vif->bcast_ring;
if (ri < 0)
return;
vif->bcast_vring = -1;
wil_vring_fini_tx(wil, ri);
vif->bcast_ring = -1;
wil_ring_fini_tx(wil, ri);
}
void wil_bcast_fini_all(struct wil6210_priv *wil)
@ -548,7 +603,7 @@ int wil_priv_init(struct wil6210_priv *wil)
}
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++)
spin_lock_init(&wil->vring_tx_data[i].lock);
spin_lock_init(&wil->ring_tx_data[i].lock);
mutex_init(&wil->mutex);
mutex_init(&wil->vif_mutex);
@ -589,11 +644,30 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
WMI_WAKEUP_TRIGGER_BCAST;
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
wil->vring_idle_trsh = 16;
wil->ring_idle_trsh = 16;
wil->reply_mid = U8_MAX;
wil->max_vifs = 1;
/* edma configuration can be updated via debugfs before allocation */
wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS;
wil->use_compressed_rx_status = true;
wil->use_rx_hw_reordering = true;
wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
/* Rx status ring size should be bigger than the number of RX buffers
* in order to prevent backpressure on the status ring, which may
* cause HW freeze.
*/
wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
/* Number of RX buffer IDs should be bigger than the RX descriptor
* ring size as in HW reorder flow, the HW can consume additional
* buffers before releasing the previous ones.
*/
wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT;
wil->amsdu_en = 1;
return 0;
out_wmi_wq:
@ -736,14 +810,24 @@ static void wil_bl_prepare_halt(struct wil6210_priv *wil)
static inline void wil_halt_cpu(struct wil6210_priv *wil)
{
wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
if (wil->hw_version >= HW_VER_TALYN_MB) {
wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB,
BIT_USER_USER_CPU_MAN_RST);
wil_w(wil, RGF_USER_MAC_CPU_0_TALYN_MB,
BIT_USER_MAC_CPU_MAN_RST);
} else {
wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
}
}
static inline void wil_release_cpu(struct wil6210_priv *wil)
{
/* Start CPU */
wil_w(wil, RGF_USER_USER_CPU_0, 1);
if (wil->hw_version >= HW_VER_TALYN_MB)
wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB, 1);
else
wil_w(wil, RGF_USER_USER_CPU_0, 1);
}
static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
@ -767,11 +851,146 @@ static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
}
}
static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
static int wil_wait_device_ready(struct wil6210_priv *wil, int no_flash)
{
int delay = 0;
u32 x, x1 = 0;
/* wait until device ready. */
if (no_flash) {
msleep(PMU_READY_DELAY_MS);
wil_dbg_misc(wil, "Reset completed\n");
} else {
do {
msleep(RST_DELAY);
x = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v0,
boot_loader_ready));
if (x1 != x) {
wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
x1, x);
x1 = x;
}
if (delay++ > RST_COUNT) {
wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
x);
return -ETIME;
}
} while (x != BL_READY);
wil_dbg_misc(wil, "Reset completed in %d ms\n",
delay * RST_DELAY);
}
return 0;
}
static int wil_wait_device_ready_talyn_mb(struct wil6210_priv *wil)
{
u32 otp_hw;
u8 signature_status;
bool otp_signature_err;
bool hw_section_done;
u32 otp_qc_secured;
int delay = 0;
/* Wait for OTP signature test to complete */
usleep_range(2000, 2200);
wil->boot_config = WIL_BOOT_ERR;
/* Poll until OTP signature status is valid.
* In vanilla and development modes, when signature test is complete
* HW sets BIT_OTP_SIGNATURE_ERR_TALYN_MB.
* In production mode BIT_OTP_SIGNATURE_ERR_TALYN_MB remains 0, poll
* for signature status change to 2 or 3.
*/
do {
otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
signature_status = WIL_GET_BITS(otp_hw, 8, 9);
otp_signature_err = otp_hw & BIT_OTP_SIGNATURE_ERR_TALYN_MB;
if (otp_signature_err &&
signature_status == WIL_SIG_STATUS_VANILLA) {
wil->boot_config = WIL_BOOT_VANILLA;
break;
}
if (otp_signature_err &&
signature_status == WIL_SIG_STATUS_DEVELOPMENT) {
wil->boot_config = WIL_BOOT_DEVELOPMENT;
break;
}
if (!otp_signature_err &&
signature_status == WIL_SIG_STATUS_PRODUCTION) {
wil->boot_config = WIL_BOOT_PRODUCTION;
break;
}
if (!otp_signature_err &&
signature_status ==
WIL_SIG_STATUS_CORRUPTED_PRODUCTION) {
/* Unrecognized OTP signature found. Possibly a
* corrupted production signature, access control
* is applied as in production mode, therefore
* do not fail
*/
wil->boot_config = WIL_BOOT_PRODUCTION;
break;
}
if (delay++ > OTP_HW_COUNT)
break;
usleep_range(OTP_HW_DELAY, OTP_HW_DELAY + 10);
} while (!otp_signature_err && signature_status == 0);
if (wil->boot_config == WIL_BOOT_ERR) {
wil_err(wil,
"invalid boot config, signature_status %d otp_signature_err %d\n",
signature_status, otp_signature_err);
return -ETIME;
}
wil_dbg_misc(wil,
"signature test done in %d usec, otp_hw 0x%x, boot_config %d\n",
delay * OTP_HW_DELAY, otp_hw, wil->boot_config);
if (wil->boot_config == WIL_BOOT_VANILLA)
/* Assuming not SPI boot (currently not supported) */
goto out;
hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
delay = 0;
while (!hw_section_done) {
msleep(RST_DELAY);
otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
if (delay++ > RST_COUNT) {
wil_err(wil, "TO waiting for hw_section_done\n");
return -ETIME;
}
}
wil_dbg_misc(wil, "HW section done in %d ms\n", delay * RST_DELAY);
otp_qc_secured = wil_r(wil, RGF_OTP_QC_SECURED);
wil->secured_boot = otp_qc_secured & BIT_BOOT_FROM_ROM ? 1 : 0;
wil_dbg_misc(wil, "secured boot is %sabled\n",
wil->secured_boot ? "en" : "dis");
out:
wil_dbg_misc(wil, "Reset completed\n");
return 0;
}
static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
{
u32 x;
int rc;
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
/* Clear MAC link up */
@ -811,10 +1030,17 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
if (wil->hw_version >= HW_VER_TALYN_MB) {
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x7e000000);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0xc00000f0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
} else {
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xfe000000);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
}
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
@ -830,34 +1056,12 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
/* wait until device ready. typical time is 20..80 msec */
if (no_flash)
do {
msleep(RST_DELAY);
x = wil_r(wil, USER_EXT_USER_PMU_3);
if (delay++ > RST_COUNT) {
wil_err(wil, "Reset not completed, PMU_3 0x%08x\n",
x);
return -ETIME;
}
} while ((x & BIT_PMU_DEVICE_RDY) == 0);
if (wil->hw_version == HW_VER_TALYN_MB)
rc = wil_wait_device_ready_talyn_mb(wil);
else
do {
msleep(RST_DELAY);
x = wil_r(wil, RGF_USER_BL +
offsetof(struct bl_dedicated_registers_v0,
boot_loader_ready));
if (x1 != x) {
wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
x1, x);
x1 = x;
}
if (delay++ > RST_COUNT) {
wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
x);
return -ETIME;
}
} while (x != BL_READY);
rc = wil_wait_device_ready(wil, no_flash);
if (rc)
return rc;
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
@ -865,7 +1069,7 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
if (no_flash) {
if (wil->hw_version < HW_VER_TALYN_MB && no_flash) {
/* Reset OTP HW vectors to fit 40MHz */
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001);
wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027);
@ -880,7 +1084,6 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57);
}
wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
return 0;
}
@ -1042,8 +1245,14 @@ static int wil_get_otp_info(struct wil6210_priv *wil)
struct net_device *ndev = wil->main_ndev;
struct wiphy *wiphy = wil_to_wiphy(wil);
u8 mac[8];
int mac_addr;
wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(RGF_OTP_MAC),
if (wil->hw_version >= HW_VER_TALYN_MB)
mac_addr = RGF_OTP_MAC_TALYN_MB;
else
mac_addr = RGF_OTP_MAC;
wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
sizeof(mac));
if (!is_valid_ether_addr(mac)) {
wil_err(wil, "Invalid MAC %pM\n", mac);
@ -1147,8 +1356,13 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
/* it is W1C, clear by writing back same value */
wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
/* clear PAL_UNIT_ICR (potential D0->D3 leftover) */
wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR), 0);
/* clear PAL_UNIT_ICR (potential D0->D3 leftover)
* In Talyn-MB host cannot access this register due to
* access control, hence PAL_UNIT_ICR is cleared by the FW
*/
if (wil->hw_version < HW_VER_TALYN_MB)
wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR),
0);
if (wil->fw_calib_result > 0) {
__le32 val = cpu_to_le32(wil->fw_calib_result |
@ -1284,7 +1498,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
rc = wil_target_reset(wil, no_flash);
wil6210_clear_irq(wil);
wil_enable_irq(wil);
wil_rx_fini(wil);
wil->txrx_ops.rx_fini(wil);
wil->txrx_ops.tx_fini(wil);
if (rc) {
if (!no_flash)
wil_bl_crash_info(wil, true);
@ -1337,7 +1552,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
clear_bit(wil_status_resetting, wil->status);
if (load_fw) {
wil_configure_interrupt_moderation(wil);
wil_unmask_irq(wil);
/* we just started MAC, wait for FW ready */
@ -1352,6 +1566,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
wil->txrx_ops.configure_interrupt_moderation(wil);
rc = wil_restore_vifs(wil);
if (rc) {
wil_err(wil, "failed to restore vifs, rc %d\n", rc);
@ -1406,8 +1622,12 @@ int __wil_up(struct wil6210_priv *wil)
if (rc)
return rc;
/* Rx VRING. After MAC and beacon */
rc = wil_rx_init(wil, 1 << rx_ring_order);
/* Rx RING. After MAC and beacon */
rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order);
if (rc)
return rc;
rc = wil->txrx_ops.tx_init(wil);
if (rc)
return rc;
@ -1568,3 +1788,11 @@ void wil_halp_unvote(struct wil6210_priv *wil)
mutex_unlock(&wil->halp.lock);
}
void wil_init_txrx_ops(struct wil6210_priv *wil)
{
if (wil->use_enhanced_dma_hw)
wil_init_txrx_ops_edma(wil);
else
wil_init_txrx_ops_legacy_dma(wil);
}

View File

@ -120,6 +120,27 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
return done;
}
static int wil6210_netdev_poll_rx_edma(struct napi_struct *napi, int budget)
{
struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
napi_rx);
int quota = budget;
int done;
wil_rx_handle_edma(wil, &quota);
done = budget - quota;
if (done < budget) {
napi_complete_done(napi, done);
wil6210_unmask_irq_rx_edma(wil);
wil_dbg_txrx(wil, "NAPI RX complete\n");
}
wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
return done;
}
static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
{
struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
@ -129,11 +150,11 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
/* always process ALL Tx complete, regardless budget - it is fast */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
struct vring *vring = &wil->vring_tx[i];
struct vring_tx_data *txdata = &wil->vring_tx_data[i];
struct wil_ring *ring = &wil->ring_tx[i];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
struct wil6210_vif *vif;
if (!vring->va || !txdata->enabled ||
if (!ring->va || !txdata->enabled ||
txdata->mid >= wil->max_vifs)
continue;
@ -157,6 +178,30 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
return min(tx_done, budget);
}
static int wil6210_netdev_poll_tx_edma(struct napi_struct *napi, int budget)
{
struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
napi_tx);
int tx_done;
/* There is only one status TX ring */
struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
if (!sring->va)
return 0;
tx_done = wil_tx_sring_handler(wil, sring);
if (tx_done < budget) {
napi_complete(napi);
wil6210_unmask_irq_tx_edma(wil);
wil_dbg_txrx(wil, "NAPI TX complete\n");
}
wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
return min(tx_done, budget);
}
static void wil_dev_setup(struct net_device *dev)
{
ether_setup(dev);
@ -228,7 +273,7 @@ static void wil_p2p_discovery_timer_fn(struct timer_list *t)
static void wil_vif_init(struct wil6210_vif *vif)
{
vif->bcast_vring = -1;
vif->bcast_ring = -1;
mutex_init(&vif->probe_client_mutex);
@ -418,11 +463,21 @@ int wil_if_add(struct wil6210_priv *wil)
}
init_dummy_netdev(&wil->napi_ndev);
netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
WIL6210_NAPI_BUDGET);
netif_tx_napi_add(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx,
WIL6210_NAPI_BUDGET);
if (wil->use_enhanced_dma_hw) {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx_edma,
WIL6210_NAPI_BUDGET);
netif_tx_napi_add(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx_edma,
WIL6210_NAPI_BUDGET);
} else {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx,
WIL6210_NAPI_BUDGET);
netif_tx_napi_add(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx,
WIL6210_NAPI_BUDGET);
}
wil_update_net_queues_bh(wil, vif, NULL, true);

View File

@ -85,7 +85,7 @@ int wil_set_capabilities(struct wil6210_priv *wil)
wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE;
break;
case JTAG_DEV_ID_TALYN:
wil->hw_name = "Talyn";
wil->hw_name = "Talyn-MA";
wil->hw_version = HW_VER_TALYN;
memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping));
wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
@ -94,6 +94,17 @@ int wil_set_capabilities(struct wil6210_priv *wil)
BIT_NO_FLASH_INDICATION)
set_bit(hw_capa_no_flash, wil->hw_capa);
break;
case JTAG_DEV_ID_TALYN_MB:
wil->hw_name = "Talyn-MB";
wil->hw_version = HW_VER_TALYN_MB;
memcpy(fw_mapping, talyn_mb_fw_mapping,
sizeof(talyn_mb_fw_mapping));
wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
set_bit(hw_capa_no_flash, wil->hw_capa);
wil->use_enhanced_dma_hw = true;
wil->use_rx_hw_reordering = true;
break;
default:
wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
jtag_id, chip_revision);
@ -102,6 +113,8 @@ int wil_set_capabilities(struct wil6210_priv *wil)
return -EINVAL;
}
wil_init_txrx_ops(wil);
iccm_section = wil_find_fw_mapping("fw_code");
if (!iccm_section) {
wil_err(wil, "fw_code section not found in fw_mapping\n");
@ -257,8 +270,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.fw_recovery = wil_platform_rop_fw_recovery,
};
u32 bar_size = pci_resource_len(pdev, 0);
int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
int i;
int dma_addr_size[] = {64, 48, 40, 32}; /* keep descending order */
int i, start_idx;
/* check HW */
dev_info(&pdev->dev, WIL_NAME
@ -293,24 +306,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto if_free;
}
/* rollback to err_plat */
/* device supports >32bit addresses */
for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
rc = dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(dma_addr_size[i]));
if (rc) {
dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
dma_addr_size[i], rc);
continue;
}
dev_info(dev, "using dma mask %d", dma_addr_size[i]);
wil->dma_addr_size = dma_addr_size[i];
break;
}
if (wil->dma_addr_size == 0)
goto err_plat;
rc = pci_enable_device(pdev);
if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
@ -350,6 +345,28 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
goto err_iounmap;
}
/* device supports >32bit addresses.
* for legacy DMA start from 48 bit.
*/
start_idx = wil->use_enhanced_dma_hw ? 0 : 1;
for (i = start_idx; i < ARRAY_SIZE(dma_addr_size); i++) {
rc = dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(dma_addr_size[i]));
if (rc) {
dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
dma_addr_size[i], rc);
continue;
}
dev_info(dev, "using dma mask %d", dma_addr_size[i]);
wil->dma_addr_size = dma_addr_size[i];
break;
}
if (wil->dma_addr_size == 0)
goto err_iounmap;
wil6210_clear_irq(wil);
/* FW should raise IRQ when ready */

View File

@ -211,7 +211,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
goto reject_suspend;
}
if (!wil_is_rx_idle(wil)) {
if (!wil->txrx_ops.is_rx_idle(wil)) {
wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
wil->suspend_stats.rejected_by_host++;
goto reject_suspend;
@ -235,9 +235,9 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
start = jiffies;
data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
while (!wil_is_rx_idle(wil)) {
while (!wil->txrx_ops.is_rx_idle(wil)) {
if (time_after(jiffies, data_comp_to)) {
if (wil_is_rx_idle(wil))
if (wil->txrx_ops.is_rx_idle(wil))
break;
wil_err(wil,
"TO waiting for idle RX, suspend failed\n");

View File

@ -95,17 +95,17 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
struct wil6210_vif *vif;
struct net_device *ndev;
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
int tid = wil_rxdesc_tid(d);
int cid = wil_rxdesc_cid(d);
int mid = wil_rxdesc_mid(d);
u16 seq = wil_rxdesc_seq(d);
int mcast = wil_rxdesc_mcast(d);
struct wil_sta_info *sta = &wil->sta[cid];
int tid, cid, mid, mcast;
u16 seq;
struct wil_sta_info *sta;
struct wil_tid_ampdu_rx *r;
u16 hseq;
int index;
wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq,
&mcast);
sta = &wil->sta[cid];
wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
mid, cid, tid, seq, mcast);
@ -315,7 +315,10 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
* bits 6..15: buffer size
*/
u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
bool agg_amsdu = !!(param_set & BIT(0));
bool agg_amsdu = wil->use_enhanced_dma_hw &&
wil->use_rx_hw_reordering &&
test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
wil->amsdu_en && (param_set & BIT(0));
int ba_policy = param_set & BIT(1);
u16 status = WLAN_STATUS_SUCCESS;
u16 ssn = seq_ctrl >> 4;
@ -360,8 +363,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
}
}
rc = wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token, status,
agg_amsdu, agg_wsize, agg_timeout);
rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token,
status, agg_amsdu, agg_wsize,
agg_timeout);
if (rc || (status != WLAN_STATUS_SUCCESS)) {
wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
status);
@ -384,7 +388,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
{
u8 agg_wsize = wil_agg_size(wil, wsize);
u16 agg_timeout = 0;
struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
int rc = 0;
if (txdata->addba_in_progress) {

View File

@ -187,6 +187,40 @@ TRACE_EVENT(wil6210_rx,
__entry->seq, __entry->type, __entry->subtype)
);
TRACE_EVENT(wil6210_rx_status,
TP_PROTO(struct wil6210_priv *wil, u8 use_compressed, u16 buff_id,
void *msg),
TP_ARGS(wil, use_compressed, buff_id, msg),
TP_STRUCT__entry(__field(u8, use_compressed)
__field(u16, buff_id)
__field(unsigned int, len)
__field(u8, mid)
__field(u8, cid)
__field(u8, tid)
__field(u8, type)
__field(u8, subtype)
__field(u16, seq)
__field(u8, mcs)
),
TP_fast_assign(__entry->use_compressed = use_compressed;
__entry->buff_id = buff_id;
__entry->len = wil_rx_status_get_length(msg);
__entry->mid = wil_rx_status_get_mid(msg);
__entry->cid = wil_rx_status_get_cid(msg);
__entry->tid = wil_rx_status_get_tid(msg);
__entry->type = wil_rx_status_get_frame_type(wil,
msg);
__entry->subtype = wil_rx_status_get_fc1(wil, msg);
__entry->seq = wil_rx_status_get_seq(wil, msg);
__entry->mcs = wil_rx_status_get_mcs(msg);
),
TP_printk(
"compressed %d buff_id %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x type 0x%1x subtype 0x%1x",
__entry->use_compressed, __entry->buff_id, __entry->len,
__entry->mid, __entry->cid, __entry->tid, __entry->mcs,
__entry->seq, __entry->type, __entry->subtype)
);
TRACE_EVENT(wil6210_tx,
TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags),
TP_ARGS(vring, index, len, frags),
@ -226,6 +260,31 @@ TRACE_EVENT(wil6210_tx_done,
__entry->err)
);
TRACE_EVENT(wil6210_tx_status,
TP_PROTO(struct wil_ring_tx_status *msg, u16 index,
unsigned int len),
TP_ARGS(msg, index, len),
TP_STRUCT__entry(__field(u16, index)
__field(unsigned int, len)
__field(u8, num_descs)
__field(u8, ring_id)
__field(u8, status)
__field(u8, mcs)
),
TP_fast_assign(__entry->index = index;
__entry->len = len;
__entry->num_descs = msg->num_descriptors;
__entry->ring_id = msg->ring_id;
__entry->status = msg->status;
__entry->mcs = wil_tx_status_get_mcs(msg);
),
TP_printk(
"ring_id %d swtail 0x%x len %d num_descs %d status 0x%x mcs %d",
__entry->ring_id, __entry->index, __entry->len,
__entry->num_descs, __entry->status, __entry->mcs)
);
#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/
#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__)

File diff suppressed because it is too large Load Diff

View File

@ -18,6 +18,9 @@
#ifndef WIL6210_TXRX_H
#define WIL6210_TXRX_H
#include "wil6210.h"
#include "txrx_edma.h"
#define BUF_SW_OWNED (1)
#define BUF_HW_OWNED (0)
@ -29,19 +32,13 @@
/* Tx/Rx path */
/* Common representation of physical address in Vring */
struct vring_dma_addr {
__le32 addr_low;
__le16 addr_high;
} __packed;
static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr)
static inline dma_addr_t wil_desc_addr(struct wil_ring_dma_addr *addr)
{
return le32_to_cpu(addr->addr_low) |
((u64)le16_to_cpu(addr->addr_high) << 32);
}
static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
static inline void wil_desc_addr_set(struct wil_ring_dma_addr *addr,
dma_addr_t pa)
{
addr->addr_low = cpu_to_le32(lower_32_bits(pa));
@ -294,7 +291,7 @@ struct vring_tx_mac {
*/
struct vring_tx_dma {
u32 d0;
struct vring_dma_addr addr;
struct wil_ring_dma_addr addr;
u8 ip_length;
u8 b11; /* 0..6: mac_length; 7:ip_version */
u8 error; /* 0..2: err; 3..7: reserved; */
@ -428,7 +425,7 @@ struct vring_rx_mac {
struct vring_rx_dma {
u32 d0;
struct vring_dma_addr addr;
struct wil_ring_dma_addr addr;
u8 ip_length;
u8 b11;
u8 error;
@ -441,14 +438,24 @@ struct vring_tx_desc {
struct vring_tx_dma dma;
} __packed;
union wil_tx_desc {
struct vring_tx_desc legacy;
struct wil_tx_enhanced_desc enhanced;
} __packed;
struct vring_rx_desc {
struct vring_rx_mac mac;
struct vring_rx_dma dma;
} __packed;
union vring_desc {
struct vring_tx_desc tx;
struct vring_rx_desc rx;
union wil_rx_desc {
struct vring_rx_desc legacy;
struct wil_rx_enhanced_desc enhanced;
} __packed;
union wil_ring_desc {
union wil_tx_desc tx;
union wil_rx_desc rx;
} __packed;
static inline int wil_rxdesc_tid(struct vring_rx_desc *d)
@ -528,6 +535,76 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
return (void *)skb->cb;
}
static inline int wil_ring_is_empty(struct wil_ring *ring)
{
return ring->swhead == ring->swtail;
}
static inline u32 wil_ring_next_tail(struct wil_ring *ring)
{
return (ring->swtail + 1) % ring->size;
}
static inline void wil_ring_advance_head(struct wil_ring *ring, int n)
{
ring->swhead = (ring->swhead + n) % ring->size;
}
static inline int wil_ring_is_full(struct wil_ring *ring)
{
return wil_ring_next_tail(ring) == ring->swhead;
}
static inline bool wil_need_txstat(struct sk_buff *skb)
{
struct ethhdr *eth = (void *)skb->data;
return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
(skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
}
static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
{
if (unlikely(wil_need_txstat(skb)))
skb_complete_wifi_ack(skb, acked);
else
acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
}
/* Used space in Tx ring */
static inline int wil_ring_used_tx(struct wil_ring *ring)
{
u32 swhead = ring->swhead;
u32 swtail = ring->swtail;
return (ring->size + swhead - swtail) % ring->size;
}
/* Available space in Tx ring */
static inline int wil_ring_avail_tx(struct wil_ring *ring)
{
return ring->size - wil_ring_used_tx(ring) - 1;
}
static inline int wil_get_min_tx_ring_id(struct wil6210_priv *wil)
{
/* In Enhanced DMA ring 0 is reserved for RX */
return wil->use_enhanced_dma_hw ? 1 : 0;
}
/* similar to ieee80211_ version, but FC contain only 1-st byte */
static inline int wil_is_back_req(u8 fc)
{
return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
}
/* wil_val_in_range - check if value in [min,max) */
static inline bool wil_val_in_range(int val, int min, int max)
{
return val >= min && val < max;
}
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
@ -536,5 +613,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
int size, u16 ssn);
void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
struct wil_tid_ampdu_rx *r);
void wil_tx_data_init(struct wil_ring_tx_data *txdata);
void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil);
#endif /* WIL6210_TXRX_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,562 @@
/*
* Copyright (c) 2012-2016,2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef WIL6210_TXRX_EDMA_H
#define WIL6210_TXRX_EDMA_H
#include "wil6210.h"
/* limit status ring size in range [ring size..max ring size] */
#define WIL_SRING_SIZE_ORDER_MIN (WIL_RING_SIZE_ORDER_MIN)
#define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX)
/* RX sring order should be bigger than RX ring order */
#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (11)
#define WIL_TX_SRING_SIZE_ORDER_DEFAULT (12)
#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (1536)
#define WIL_DEFAULT_RX_STATUS_RING_ID 0
#define WIL_RX_DESC_RING_ID 0
#define WIL_RX_STATUS_IRQ_IDX 0
#define WIL_TX_STATUS_IRQ_IDX 1
#define WIL_EDMA_AGG_WATERMARK (0xffff)
#define WIL_EDMA_AGG_WATERMARK_POS (16)
#define WIL_EDMA_IDLE_TIME_LIMIT_USEC (50)
#define WIL_EDMA_TIME_UNIT_CLK_CYCLES (330) /* fits 1 usec */
/* Error field */
#define WIL_RX_EDMA_ERROR_MIC (1)
#define WIL_RX_EDMA_ERROR_KEY (2) /* Key missing */
#define WIL_RX_EDMA_ERROR_REPLAY (3)
#define WIL_RX_EDMA_ERROR_AMSDU (4)
#define WIL_RX_EDMA_ERROR_FCS (7)
#define WIL_RX_EDMA_ERROR_L3_ERR (BIT(0) | BIT(1))
#define WIL_RX_EDMA_ERROR_L4_ERR (BIT(0) | BIT(1))
#define WIL_RX_EDMA_DLPF_LU_MISS_BIT BIT(11)
#define WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK 0x7
#define WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK 0xf
#define WIL_RX_EDMA_DLPF_LU_MISS_CID_POS 2
#define WIL_RX_EDMA_DLPF_LU_HIT_CID_POS 4
#define WIL_RX_EDMA_DLPF_LU_MISS_TID_POS 5
#define WIL_RX_EDMA_MID_VALID_BIT BIT(22)
#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16
#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6
#define WIL_EDMA_DESC_TX_CFG_EOP_POS 0
#define WIL_EDMA_DESC_TX_CFG_EOP_LEN 1
#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS 3
#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_LEN 2
#define WIL_EDMA_DESC_TX_CFG_SEG_EN_POS 5
#define WIL_EDMA_DESC_TX_CFG_SEG_EN_LEN 1
#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS 6
#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_LEN 1
#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS 7
#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_LEN 1
#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS 15
#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_LEN 1
#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS 5
#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_LEN 1
/* Enhanced Rx descriptor - MAC part
* [dword 0] : Reserved
* [dword 1] : Reserved
* [dword 2] : Reserved
* [dword 3]
* bit 0..15 : Buffer ID
* bit 16..31 : Reserved
*/
struct wil_ring_rx_enhanced_mac {
u32 d[3];
__le16 buff_id;
u16 reserved;
} __packed;
/* Enhanced Rx descriptor - DMA part
* [dword 0] - Reserved
* [dword 1]
* bit 0..31 : addr_low:32 The payload buffer address, bits 0-31
* [dword 2]
* bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47
* bit 16..31 : Reserved
* [dword 3]
* bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63
* bit 16..31 : length
*/
struct wil_ring_rx_enhanced_dma {
u32 d0;
struct wil_ring_dma_addr addr;
u16 w5;
__le16 addr_high_high;
__le16 length;
} __packed;
struct wil_rx_enhanced_desc {
struct wil_ring_rx_enhanced_mac mac;
struct wil_ring_rx_enhanced_dma dma;
} __packed;
/* Enhanced Tx descriptor - DMA part
* [dword 0]
* Same as legacy
* [dword 1]
* bit 0..31 : addr_low:32 The payload buffer address, bits 0-31
* [dword 2]
* bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47
* bit 16..23 : ip_length:8 The IP header length for the TX IP checksum
* offload feature
* bit 24..30 : mac_length:7
* bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
* [dword 3]
* bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63
* bit 16..31 : length
*/
struct wil_ring_tx_enhanced_dma {
u8 l4_hdr_len;
u8 cmd;
u16 w1;
struct wil_ring_dma_addr addr;
u8 ip_length;
u8 b11; /* 0..6: mac_length; 7:ip_version */
__le16 addr_high_high;
__le16 length;
} __packed;
/* Enhanced Tx descriptor - MAC part
* [dword 0]
* bit 0.. 9 : lifetime_expiry_value:10
* bit 10 : interrupt_en:1
* bit 11 : status_en:1
* bit 12..13 : txss_override:2
* bit 14 : timestamp_insertion:1
* bit 15 : duration_preserve:1
* bit 16..21 : reserved0:6
* bit 22..26 : mcs_index:5
* bit 27 : mcs_en:1
* bit 28..30 : reserved1:3
* bit 31 : sn_preserved:1
* [dword 1]
* bit 0.. 3 : pkt_mode:4
* bit 4 : pkt_mode_en:1
* bit 5..14 : reserved0:10
* bit 15 : ack_policy_en:1
* bit 16..19 : dst_index:4
* bit 20 : dst_index_en:1
* bit 21..22 : ack_policy:2
* bit 23 : lifetime_en:1
* bit 24..30 : max_retry:7
* bit 31 : max_retry_en:1
* [dword 2]
* bit 0.. 7 : num_of_descriptors:8
* bit 8..17 : reserved:10
* bit 18..19 : l2_translation_type:2 00 - bypass, 01 - 802.3, 10 - 802.11
* bit 20 : snap_hdr_insertion_en:1
* bit 21 : vlan_removal_en:1
* bit 22..23 : reserved0:2
* bit 24 : Dest ID extension:1
* bit 25..31 : reserved0:7
* [dword 3]
* bit 0..15 : tso_mss:16
* bit 16..31 : descriptor_scratchpad:16 - mailbox between driver and ucode
*/
struct wil_ring_tx_enhanced_mac {
u32 d[3];
__le16 tso_mss;
u16 scratchpad;
} __packed;
struct wil_tx_enhanced_desc {
struct wil_ring_tx_enhanced_mac mac;
struct wil_ring_tx_enhanced_dma dma;
} __packed;
#define TX_STATUS_DESC_READY_POS 7
/* Enhanced TX status message
* [dword 0]
* bit 0.. 7 : Number of Descriptor:8 - The number of descriptors that
* are used to form the packets. It is needed for WB when
* releasing the packet
* bit 8..15 : tx_ring_id:8 The transmission ring ID that is related to
* the message
* bit 16..23 : Status:8 - The TX status Code
* 0x0 - A successful transmission
* 0x1 - Retry expired
* 0x2 - Lifetime Expired
* 0x3 - Released
* 0x4-0xFF - Reserved
* bit 24..30 : Reserved:7
* bit 31 : Descriptor Ready bit:1 - It is initiated to
* zero by the driver when the ring is created. It is set by the HW
* to one for each completed status message. Each wrap around,
* the DR bit value is flipped.
* [dword 1]
* bit 0..31 : timestamp:32 - Set when MPDU is transmitted.
* [dword 2]
* bit 0.. 4 : MCS:5 - The transmitted MCS value
* bit 5 : Reserved:1
* bit 6.. 7 : CB mode:2 - 0-DMG 1-EDMG 2-Wide
* bit 8..12 : QID:5 - The QID that was used for the transmission
* bit 13..15 : Reserved:3
* bit 16..20 : Num of MSDUs:5 - Number of MSDUs in the aggregation
* bit 21..22 : Reserved:2
* bit 23 : Retry:1 - An indication that the transmission was retried
* bit 24..31 : TX-Sector:8 - the antenna sector that was used for
* transmission
* [dword 3]
* bit 0..11 : Sequence number:12 - The Sequence Number that was used
* for the MPDU transmission
* bit 12..31 : Reserved:20
*/
struct wil_ring_tx_status {
u8 num_descriptors;
u8 ring_id;
u8 status;
u8 desc_ready; /* Only the last bit should be set */
u32 timestamp;
u32 d2;
u16 seq_number; /* Only the first 12 bits */
u16 w7;
} __packed;
/* Enhanced Rx status message - compressed part
* [dword 0]
* bit 0.. 2 : L2 Rx Status:3 - The L2 packet reception Status
* 0-Success, 1-MIC Error, 2-Key Error, 3-Replay Error,
* 4-A-MSDU Error, 5-Reserved, 6-Reserved, 7-FCS Error
* bit 3.. 4 : L3 Rx Status:2 - Bit0 - L3I - L3 identified and checksum
* calculated, Bit1- L3Err - IPv4 Checksum Error
* bit 5.. 6 : L4 Rx Status:2 - Bit0 - L4I - L4 identified and checksum
* calculated, Bit1- L4Err - TCP/UDP Checksum Error
* bit 7 : Reserved:1
* bit 8..19 : Flow ID:12 - MSDU flow ID
* bit 20..21 : MID:2 - The MAC ID
* bit 22 : MID_V:1 - The MAC ID field is valid
* bit 23 : L3T:1 - IP types: 0-IPv6, 1-IPv4
* bit 24 : L4T:1 - Layer 4 Type: 0-UDP, 1-TCP
* bit 25 : BC:1 - The received MPDU is broadcast
* bit 26 : MC:1 - The received MPDU is multicast
* bit 27 : Raw:1 - The MPDU received with no translation
* bit 28 : Sec:1 - The FC control (b14) - Frame Protected
* bit 29 : Error:1 - An error is set when (L2 status != 0) ||
* (L3 status == 3) || (L4 status == 3)
* bit 30 : EOP:1 - End of MSDU signaling. It is set to mark the end
* of the transfer, otherwise the status indicates buffer
* only completion.
* bit 31 : Descriptor Ready bit:1 - It is initiated to
* zero by the driver when the ring is created. It is set
* by the HW to one for each completed status message.
* Each wrap around, the DR bit value is flipped.
* [dword 1]
* bit 0.. 5 : MAC Len:6 - The number of bytes that are used for L2 header
* bit 6..11 : IPLEN:6 - The number of DW that are used for L3 header
* bit 12..15 : I4Len:4 - The number of DW that are used for L4 header
* bit 16..21 : MCS:6 - The received MCS field from the PLCP Header
* bit 22..23 : CB mode:2 - The CB Mode: 0-DMG, 1-EDMG, 2-Wide
* bit 24..27 : Data Offset:4 - The data offset, a code that describe the
* payload shift from the beginning of the buffer:
* 0 - 0 Bytes, 3 - 2 Bytes
* bit 28 : A-MSDU Present:1 - The QoS (b7) A-MSDU present field
* bit 29 : A-MSDU Type:1 The QoS (b8) A-MSDU Type field
* bit 30 : A-MPDU:1 - Packet is part of aggregated MPDU
* bit 31 : Key ID:1 - The extracted Key ID from the encryption header
* [dword 2]
* bit 0..15 : Buffer ID:16 - The Buffer Identifier
* bit 16..31 : Length:16 - It indicates the valid bytes that are stored
* in the current descriptor buffer. For multiple buffer
* descriptor, SW need to sum the total descriptor length
* in all buffers to produce the packet length
* [dword 3]
* bit 0..31 : timestamp:32 - The MPDU Timestamp.
*/
struct wil_rx_status_compressed {
u32 d0;
u32 d1;
__le16 buff_id;
__le16 length;
u32 timestamp;
} __packed;
/* Enhanced Rx status message - extension part
* [dword 0]
* bit 0.. 4 : QID:5 - The Queue Identifier that the packet is received
* from
* bit 5.. 7 : Reserved:3
* bit 8..11 : TID:4 - The QoS (b3-0) TID Field
* bit 12..15 Source index:4 - The Source index that was found
during Parsing the TA. This field is used to define the
source of the packet
* bit 16..18 : Destination index:3 - The Destination index that
was found during Parsing the RA.
* bit 19..20 : DS Type:2 - The FC Control (b9-8) - From / To DS
* bit 21..22 : MIC ICR:2 - this signal tells the DMA to assert an
interrupt after it writes the packet
* bit 23 : ESOP:1 - The QoS (b4) ESOP field
* bit 24 : RDG:1
* bit 25..31 : Reserved:7
* [dword 1]
* bit 0.. 1 : Frame Type:2 - The FC Control (b3-2) - MPDU Type
(management, data, control and extension)
* bit 2.. 5 : Syb type:4 - The FC Control (b7-4) - Frame Subtype
* bit 6..11 : Ext sub type:6 - The FC Control (b11-8) - Frame Extended
* Subtype
* bit 12..13 : ACK Policy:2 - The QoS (b6-5) ACK Policy fields
* bit 14 : DECRYPT_BYP:1 - The MPDU is bypass by the decryption unit
* bit 15..23 : Reserved:9
* bit 24..31 : RSSI/SNR:8 - The RSSI / SNR measurement for the received
* MPDU
* [dword 2]
* bit 0..11 : SN:12 - The received Sequence number field
* bit 12..15 : Reserved:4
* bit 16..31 : PN bits [15:0]:16
* [dword 3]
* bit 0..31 : PN bits [47:16]:32
*/
struct wil_rx_status_extension {
u32 d0;
u32 d1;
__le16 seq_num; /* only lower 12 bits */
u16 pn_15_0;
u32 pn_47_16;
} __packed;
struct wil_rx_status_extended {
struct wil_rx_status_compressed comp;
struct wil_rx_status_extension ext;
} __packed;
static inline void *wil_skb_rxstatus(struct sk_buff *skb)
{
return (void *)skb->cb;
}
static inline __le16 wil_rx_status_get_length(void *msg)
{
return ((struct wil_rx_status_compressed *)msg)->length;
}
static inline u8 wil_rx_status_get_mcs(void *msg)
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
16, 21);
}
static inline u16 wil_rx_status_get_flow_id(void *msg)
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
8, 19);
}
static inline u8 wil_rx_status_get_mcast(void *msg)
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
26, 26);
}
/**
* In case of DLPF miss the parsing of flow Id should be as follows:
* dest_id:2
* src_id :3 - cid
* tid:3
* Otherwise:
* tid:4
* cid:4
*/
static inline u8 wil_rx_status_get_cid(void *msg)
{
u16 val = wil_rx_status_get_flow_id(msg);
if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
/* CID is in bits 2..4 */
return (val >> WIL_RX_EDMA_DLPF_LU_MISS_CID_POS) &
WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
else
/* CID is in bits 4..7 */
return (val >> WIL_RX_EDMA_DLPF_LU_HIT_CID_POS) &
WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK;
}
static inline u8 wil_rx_status_get_tid(void *msg)
{
u16 val = wil_rx_status_get_flow_id(msg);
if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
/* TID is in bits 5..7 */
return (val >> WIL_RX_EDMA_DLPF_LU_MISS_TID_POS) &
WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
else
/* TID is in bits 0..3 */
return val & WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
}
static inline int wil_rx_status_get_desc_rdy_bit(void *msg)
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
31, 31);
}
static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
30, 30);
}
static inline __le16 wil_rx_status_get_buff_id(void *msg)
{
return ((struct wil_rx_status_compressed *)msg)->buff_id;
}
static inline u8 wil_rx_status_get_data_offset(void *msg)
{
u8 val = WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
24, 27);
switch (val) {
case 0: return 0;
case 3: return 2;
default: return 0xFF;
}
}
static inline int wil_rx_status_get_frame_type(struct wil6210_priv *wil,
void *msg)
{
if (wil->use_compressed_rx_status)
return IEEE80211_FTYPE_DATA;
return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
0, 1) << 2;
}
static inline int wil_rx_status_get_fc1(struct wil6210_priv *wil, void *msg)
{
if (wil->use_compressed_rx_status)
return 0;
return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
0, 5) << 2;
}
static inline __le16 wil_rx_status_get_seq(struct wil6210_priv *wil, void *msg)
{
if (wil->use_compressed_rx_status)
return 0;
return ((struct wil_rx_status_extended *)msg)->ext.seq_num;
}
static inline int wil_rx_status_get_mid(void *msg)
{
if (!(((struct wil_rx_status_compressed *)msg)->d0 &
WIL_RX_EDMA_MID_VALID_BIT))
return 0; /* use the default MID */
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
20, 21);
}
static inline int wil_rx_status_get_error(void *msg)
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
29, 29);
}
static inline int wil_rx_status_get_l2_rx_status(void *msg)
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
0, 2);
}
static inline int wil_rx_status_get_l3_rx_status(void *msg)
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
3, 4);
}
static inline int wil_rx_status_get_l4_rx_status(void *msg)
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
5, 6);
}
static inline int wil_rx_status_get_security(void *msg)
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
28, 28);
}
static inline u8 wil_rx_status_get_key_id(void *msg)
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
31, 31);
}
static inline u8 wil_tx_status_get_mcs(struct wil_ring_tx_status *msg)
{
return WIL_GET_BITS(msg->d2, 0, 4);
}
static inline u32 wil_ring_next_head(struct wil_ring *ring)
{
return (ring->swhead + 1) % ring->size;
}
static inline void wil_desc_set_addr_edma(struct wil_ring_dma_addr *addr,
__le16 *addr_high_high,
dma_addr_t pa)
{
addr->addr_low = cpu_to_le32(lower_32_bits(pa));
addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
*addr_high_high = cpu_to_le16((u16)(upper_32_bits(pa) >> 16));
}
static inline
dma_addr_t wil_tx_desc_get_addr_edma(struct wil_ring_tx_enhanced_dma *dma)
{
return le32_to_cpu(dma->addr.addr_low) |
((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
((u64)le16_to_cpu(dma->addr_high_high) << 48);
}
static inline
dma_addr_t wil_rx_desc_get_addr_edma(struct wil_ring_rx_enhanced_dma *dma)
{
return le32_to_cpu(dma->addr.addr_low) |
((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
((u64)le16_to_cpu(dma->addr_high_high) << 48);
}
void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil);
int wil_tx_sring_handler(struct wil6210_priv *wil,
struct wil_status_ring *sring);
void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota);
void wil_init_txrx_ops_edma(struct wil6210_priv *wil);
#endif /* WIL6210_TXRX_EDMA_H */

View File

@ -24,6 +24,7 @@
#include <net/cfg80211.h>
#include <linux/timex.h>
#include <linux/types.h>
#include <linux/irqreturn.h>
#include "wmi.h"
#include "wil_platform.h"
#include "fw.h"
@ -37,6 +38,10 @@ extern bool rx_large_buf;
extern bool debug_fw;
extern bool disable_ap_sme;
struct wil6210_priv;
struct wil6210_vif;
union wil_tx_desc;
#define WIL_NAME "wil6210"
#define WIL_FW_NAME_DEFAULT "wil6210.fw"
@ -80,6 +85,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */
#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */
#define WIL6210_MAX_STATUS_RINGS (8)
/* Hardware offload block adds the following:
* 26 bytes - 3-address QoS data header
* 8 bytes - IV + EIV (for GCMP)
@ -203,7 +210,9 @@ struct RGF_ICR {
#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */
#define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2)
#define RGF_USER_OTP_HW_RD_MACHINE_1 (0x880ce0)
#define BIT_NO_FLASH_INDICATION BIT(8)
#define BIT_OTP_SIGNATURE_ERR_TALYN_MB BIT(0)
#define BIT_OTP_HW_SECTION_DONE_TALYN_MB BIT(2)
#define BIT_NO_FLASH_INDICATION BIT(8)
#define RGF_USER_XPM_IFC_RD_TIME1 (0x880cec)
#define RGF_USER_XPM_IFC_RD_TIME2 (0x880cf0)
#define RGF_USER_XPM_IFC_RD_TIME3 (0x880cf4)
@ -305,20 +314,49 @@ struct RGF_ICR {
#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
#define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
#define RGF_OTP_QC_SECURED (0x8a0038)
#define BIT_BOOT_FROM_ROM BIT(31)
/* eDMA */
#define RGF_INT_COUNT_ON_SPECIAL_EVT (0x8b62d8)
#define RGF_INT_CTRL_INT_GEN_CFG_0 (0x8bc000)
#define RGF_INT_CTRL_INT_GEN_CFG_1 (0x8bc004)
#define RGF_INT_GEN_TIME_UNIT_LIMIT (0x8bc0c8)
#define RGF_INT_GEN_CTRL (0x8bc0ec)
#define BIT_CONTROL_0 BIT(0)
/* eDMA status interrupts */
#define RGF_INT_GEN_RX_ICR (0x8bc0f4)
#define BIT_RX_STATUS_IRQ BIT(WIL_RX_STATUS_IRQ_IDX)
#define RGF_INT_GEN_TX_ICR (0x8bc110)
#define BIT_TX_STATUS_IRQ BIT(WIL_TX_STATUS_IRQ_IDX)
#define RGF_INT_CTRL_RX_INT_MASK (0x8bc12c)
#define RGF_INT_CTRL_TX_INT_MASK (0x8bc130)
#define RGF_INT_GEN_IDLE_TIME_LIMIT (0x8bc134)
#define USER_EXT_USER_PMU_3 (0x88d00c)
#define BIT_PMU_DEVICE_RDY BIT(0)
#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
#define JTAG_DEV_ID_SPARROW (0x2632072f)
#define JTAG_DEV_ID_TALYN (0x7e0e1)
#define JTAG_DEV_ID_TALYN_MB (0x1007e0e1)
#define RGF_USER_REVISION_ID (0x88afe4)
#define RGF_USER_REVISION_ID_MASK (3)
#define REVISION_ID_SPARROW_B0 (0x0)
#define REVISION_ID_SPARROW_D0 (0x3)
#define RGF_OTP_MAC_TALYN_MB (0x8a0304)
#define RGF_OTP_MAC (0x8a0620)
/* Talyn-MB */
#define RGF_USER_USER_CPU_0_TALYN_MB (0x8c0138)
#define RGF_USER_MAC_CPU_0_TALYN_MB (0x8c0154)
/* crash codes for FW/Ucode stored here */
/* ASSERT RGFs */
@ -332,6 +370,7 @@ enum {
HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */
HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */
HW_VER_TALYN, /* JTAG_DEV_ID_TALYN */
HW_VER_TALYN_MB /* JTAG_DEV_ID_TALYN_MB */
};
/* popular locations */
@ -349,7 +388,14 @@ enum {
/* Hardware definitions end */
#define SPARROW_FW_MAPPING_TABLE_SIZE 10
#define TALYN_FW_MAPPING_TABLE_SIZE 13
#define MAX_FW_MAPPING_TABLE_SIZE 13
#define TALYN_MB_FW_MAPPING_TABLE_SIZE 19
#define MAX_FW_MAPPING_TABLE_SIZE 19
/* Common representation of physical address in wil ring */
struct wil_ring_dma_addr {
__le32 addr_low;
__le16 addr_high;
} __packed;
struct fw_map {
u32 from; /* linker address - from, inclusive */
@ -357,12 +403,14 @@ struct fw_map {
u32 host; /* PCI/Host address - BAR0 + 0x880000 */
const char *name; /* for debugfs */
bool fw; /* true if FW mapping, false if UCODE mapping */
bool crash_dump; /* true if should be dumped during crash dump */
};
/* array size should be in sync with actual definition in the wmi.c */
extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE];
extern const struct fw_map sparrow_d0_mac_rgf_ext;
extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE];
extern const struct fw_map talyn_mb_fw_mapping[TALYN_MB_FW_MAPPING_TABLE_SIZE];
extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
/**
@ -438,7 +486,7 @@ enum { /* for wil_ctx.mapped_as */
};
/**
* struct wil_ctx - software context for Vring descriptor
* struct wil_ctx - software context for ring descriptor
*/
struct wil_ctx {
struct sk_buff *skb;
@ -446,22 +494,96 @@ struct wil_ctx {
u8 mapped_as;
};
union vring_desc;
struct vring {
struct wil_desc_ring_rx_swtail { /* relevant for enhanced DMA only */
u32 *va;
dma_addr_t pa;
volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */
u16 size; /* number of vring_desc elements */
};
/**
* A general ring structure, used for RX and TX.
* In legacy DMA it represents the vring,
* In enahnced DMA it represents the descriptor ring (vrings are handled by FW)
*/
struct wil_ring {
dma_addr_t pa;
volatile union wil_ring_desc *va;
u16 size; /* number of wil_ring_desc elements */
u32 swtail;
u32 swhead;
u32 hwtail; /* write here to inform hw */
struct wil_ctx *ctx; /* ctx[size] - software context */
struct wil_desc_ring_rx_swtail edma_rx_swtail;
bool is_rx;
};
/**
* Additional data for Tx Vring
* Additional data for Rx ring.
* Used for enhanced DMA RX chaining.
*/
struct vring_tx_data {
struct wil_ring_rx_data {
/* the skb being assembled */
struct sk_buff *skb;
/* true if we are skipping a bad fragmented packet */
bool skipping;
u16 buff_size;
};
/**
* Status ring structure, used for enhanced DMA completions for RX and TX.
*/
struct wil_status_ring {
dma_addr_t pa;
void *va; /* pointer to ring_[tr]x_status elements */
u16 size; /* number of status elements */
size_t elem_size; /* status element size in bytes */
u32 swhead;
u32 hwtail; /* write here to inform hw */
bool is_rx;
u8 desc_rdy_pol; /* Expected descriptor ready bit polarity */
struct wil_ring_rx_data rx_data;
};
/**
* struct tx_rx_ops - different TX/RX ops for legacy and enhanced
* DMA flow
*/
struct wil_txrx_ops {
void (*configure_interrupt_moderation)(struct wil6210_priv *wil);
/* TX ops */
int (*ring_init_tx)(struct wil6210_vif *vif, int ring_id,
int size, int cid, int tid);
void (*ring_fini_tx)(struct wil6210_priv *wil, struct wil_ring *ring);
int (*ring_init_bcast)(struct wil6210_vif *vif, int id, int size);
int (*tx_init)(struct wil6210_priv *wil);
void (*tx_fini)(struct wil6210_priv *wil);
int (*tx_desc_map)(union wil_tx_desc *desc, dma_addr_t pa,
u32 len, int ring_index);
void (*tx_desc_unmap)(struct device *dev,
union wil_tx_desc *desc,
struct wil_ctx *ctx);
int (*tx_ring_tso)(struct wil6210_priv *wil, struct wil6210_vif *vif,
struct wil_ring *ring, struct sk_buff *skb);
irqreturn_t (*irq_tx)(int irq, void *cookie);
/* RX ops */
int (*rx_init)(struct wil6210_priv *wil, u16 ring_size);
void (*rx_fini)(struct wil6210_priv *wil);
int (*wmi_addba_rx_resp)(struct wil6210_priv *wil, u8 mid, u8 cid,
u8 tid, u8 token, u16 status, bool amsdu,
u16 agg_wsize, u16 timeout);
void (*get_reorder_params)(struct wil6210_priv *wil,
struct sk_buff *skb, int *tid, int *cid,
int *mid, u16 *seq, int *mcast);
void (*get_netif_rx_params)(struct sk_buff *skb,
int *cid, int *security);
int (*rx_crypto_check)(struct wil6210_priv *wil, struct sk_buff *skb);
bool (*is_rx_idle)(struct wil6210_priv *wil);
irqreturn_t (*irq_rx)(int irq, void *cookie);
};
/**
* Additional data for Tx ring
*/
struct wil_ring_tx_data {
bool dot1x_open;
int enabled;
cycles_t idle, last_idle, begin;
@ -564,6 +686,9 @@ struct wil_net_stats {
unsigned long rx_short_frame;
unsigned long rx_large_frame;
unsigned long rx_replay;
unsigned long rx_mic_error; /* eDMA specific */
unsigned long rx_key_error; /* eDMA specific */
unsigned long rx_amsdu_error; /* eDMA specific */
u16 last_mcs_rx;
u64 rx_per_mcs[WIL_MCS_MAX + 1];
};
@ -681,7 +806,7 @@ struct wil6210_vif {
u8 hidden_ssid; /* relevant in AP mode */
u32 ap_isolate; /* no intra-BSS communication */
bool pbss;
int bcast_vring;
int bcast_ring;
struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */
int locally_generated_disc; /* relevant in STA mode */
struct timer_list connect_timer;
@ -697,6 +822,31 @@ struct wil6210_vif {
int net_queue_stopped; /* netif_tx_stop_all_queues invoked */
};
/**
* RX buffer allocated for enhanced DMA RX descriptors
*/
struct wil_rx_buff {
struct sk_buff *skb;
struct list_head list;
int id;
};
/**
* During Rx completion processing, the driver extracts a buffer ID which
* is used as an index to the rx_buff_mgmt.buff_arr array and then the SKB
* is given to the network stack and the buffer is moved from the 'active'
* list to the 'free' list.
* During Rx refill, SKBs are attached to free buffers and moved to the
* 'active' list.
*/
struct wil_rx_buff_mgmt {
struct wil_rx_buff *buff_arr;
size_t size; /* number of items in buff_arr */
struct list_head active;
struct list_head free;
unsigned long free_list_empty_cnt; /* statistics */
};
struct wil6210_priv {
struct pci_dev *pdev;
u32 bar_size;
@ -761,14 +911,20 @@ struct wil6210_priv {
struct net_device napi_ndev; /* dummy net_device serving all VIFs */
/* DMA related */
struct vring vring_rx;
struct wil_ring ring_rx;
unsigned int rx_buf_len;
struct vring vring_tx[WIL6210_MAX_TX_RINGS];
struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
struct wil_ring ring_tx[WIL6210_MAX_TX_RINGS];
struct wil_ring_tx_data ring_tx_data[WIL6210_MAX_TX_RINGS];
struct wil_status_ring srings[WIL6210_MAX_STATUS_RINGS];
u8 num_rx_status_rings;
int tx_sring_idx;
u8 ring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
struct wil_sta_info sta[WIL6210_MAX_CID];
u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */
u32 ring_idle_trsh; /* HW fetches up to 16 descriptors at once */
u32 dma_addr_size; /* indicates dma addr size */
struct wil_rx_buff_mgmt rx_buff_mgmt;
bool use_enhanced_dma_hw;
struct wil_txrx_ops txrx_ops;
struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
/* statistics */
@ -811,6 +967,16 @@ struct wil6210_priv {
u32 rgf_fw_assert_code_addr;
u32 rgf_ucode_assert_code_addr;
u32 iccm_base;
/* relevant only for eDMA */
bool use_compressed_rx_status;
u32 rx_status_ring_order;
u32 tx_status_ring_order;
u32 rx_buff_id_count;
bool amsdu_en;
bool use_rx_hw_reordering;
bool secured_boot;
u8 boot_config;
};
#define wil_to_wiphy(i) (i->wiphy)
@ -990,7 +1156,7 @@ int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index,
int key_usage);
int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie);
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
@ -1083,30 +1249,28 @@ void wil_probe_client_flush(struct wil6210_vif *vif);
void wil_probe_client_worker(struct work_struct *work);
void wil_disconnect_worker(struct work_struct *work);
int wil_rx_init(struct wil6210_priv *wil, u16 size);
void wil_rx_fini(struct wil6210_priv *wil);
void wil_init_txrx_ops(struct wil6210_priv *wil);
/* TX API */
int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
int cid, int tid);
void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
int wil_tx_init(struct wil6210_vif *vif, int cid);
int wil_ring_init_tx(struct wil6210_vif *vif, int cid);
int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size);
int wil_bcast_init(struct wil6210_vif *vif);
void wil_bcast_fini(struct wil6210_vif *vif);
void wil_bcast_fini_all(struct wil6210_priv *wil);
void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
struct vring *vring, bool should_stop);
struct wil_ring *ring, bool should_stop);
void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
struct vring *vring, bool check_stop);
struct wil_ring *ring, bool check_stop);
netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
int wil_tx_complete(struct wil6210_vif *vif, int ringid);
void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil);
/* RX API */
void wil_rx_handle(struct wil6210_priv *wil, int *quota);
void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil);
int wil_iftype_nl2wmi(enum nl80211_iftype type);
@ -1127,7 +1291,6 @@ bool wil_is_wmi_idle(struct wil6210_priv *wil);
int wmi_resume(struct wil6210_priv *wil);
int wmi_suspend(struct wil6210_priv *wil);
bool wil_is_tx_idle(struct wil6210_priv *wil);
bool wil_is_rx_idle(struct wil6210_priv *wil);
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
void wil_fw_core_dump(struct wil6210_priv *wil);
@ -1142,4 +1305,19 @@ int wmi_start_sched_scan(struct wil6210_priv *wil,
int wmi_stop_sched_scan(struct wil6210_priv *wil);
int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len);
int reverse_memcmp(const void *cs, const void *ct, size_t count);
/* WMI for enhanced DMA */
int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id);
int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil,
u16 max_rx_pl_per_desc);
int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id);
int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id);
int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
int tid);
int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id);
int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
u8 tid, u8 token, u16 status, bool amsdu,
u16 agg_wsize, u16 timeout);
#endif /* __WIL6210_H__ */

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -36,7 +37,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
if (!map->fw)
if (!map->crash_dump)
continue;
if (map->host < host_min)
@ -85,7 +86,7 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
if (!map->fw)
if (!map->crash_dump)
continue;
data = (void * __force)wil->csr + HOSTADDR(map->host);

View File

@ -89,28 +89,28 @@ MODULE_PARM_DESC(led_id,
*/
const struct fw_map sparrow_fw_mapping[] = {
/* FW code RAM 256k */
{0x000000, 0x040000, 0x8c0000, "fw_code", true},
{0x000000, 0x040000, 0x8c0000, "fw_code", true, true},
/* FW data RAM 32k */
{0x800000, 0x808000, 0x900000, "fw_data", true},
{0x800000, 0x808000, 0x900000, "fw_data", true, true},
/* periph data 128k */
{0x840000, 0x860000, 0x908000, "fw_peri", true},
{0x840000, 0x860000, 0x908000, "fw_peri", true, true},
/* various RGF 40k */
{0x880000, 0x88a000, 0x880000, "rgf", true},
{0x880000, 0x88a000, 0x880000, "rgf", true, true},
/* AGC table 4k */
{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
/* Pcie_ext_rgf 4k */
{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
/* mac_ext_rgf 512b */
{0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true},
{0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true, true},
/* upper area 548k */
{0x8c0000, 0x949000, 0x8c0000, "upper", true},
{0x8c0000, 0x949000, 0x8c0000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
* wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
/* ucode code RAM 128k */
{0x000000, 0x020000, 0x920000, "uc_code", false},
{0x000000, 0x020000, 0x920000, "uc_code", false, false},
/* ucode data RAM 16k */
{0x800000, 0x804000, 0x940000, "uc_data", false},
{0x800000, 0x804000, 0x940000, "uc_data", false, false},
};
/**
@ -118,7 +118,7 @@ const struct fw_map sparrow_fw_mapping[] = {
* it is a bit larger to support extra features
*/
const struct fw_map sparrow_d0_mac_rgf_ext = {
0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true
0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true, true
};
/**
@ -134,34 +134,89 @@ const struct fw_map sparrow_d0_mac_rgf_ext = {
*/
const struct fw_map talyn_fw_mapping[] = {
/* FW code RAM 1M */
{0x000000, 0x100000, 0x900000, "fw_code", true},
{0x000000, 0x100000, 0x900000, "fw_code", true, true},
/* FW data RAM 128k */
{0x800000, 0x820000, 0xa00000, "fw_data", true},
{0x800000, 0x820000, 0xa00000, "fw_data", true, true},
/* periph. data RAM 96k */
{0x840000, 0x858000, 0xa20000, "fw_peri", true},
{0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
/* various RGF 40k */
{0x880000, 0x88a000, 0x880000, "rgf", true},
{0x880000, 0x88a000, 0x880000, "rgf", true, true},
/* AGC table 4k */
{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true},
{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
/* Pcie_ext_rgf 4k */
{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true},
{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
/* mac_ext_rgf 1344b */
{0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true},
{0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true, true},
/* ext USER RGF 4k */
{0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true},
{0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
/* OTP 4k */
{0x8a0000, 0x8a1000, 0x8a0000, "otp", true},
{0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
/* DMA EXT RGF 64k */
{0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true},
{0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
/* upper area 1536k */
{0x900000, 0xa80000, 0x900000, "upper", true},
{0x900000, 0xa80000, 0x900000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
* wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
/* ucode code RAM 256k */
{0x000000, 0x040000, 0xa38000, "uc_code", false},
{0x000000, 0x040000, 0xa38000, "uc_code", false, false},
/* ucode data RAM 32k */
{0x800000, 0x808000, 0xa78000, "uc_data", false},
{0x800000, 0x808000, 0xa78000, "uc_data", false, false},
};
/**
* @talyn_mb_fw_mapping provides memory remapping table for Talyn-MB
*
* array size should be in sync with the declaration in the wil6210.h
*
* Talyn MB memory mapping:
* Linker address PCI/Host address
* 0x880000 .. 0xc80000 4Mb BAR0
* 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM
* 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH
*/
const struct fw_map talyn_mb_fw_mapping[] = {
/* FW code RAM 768k */
{0x000000, 0x0c0000, 0x900000, "fw_code", true, true},
/* FW data RAM 128k */
{0x800000, 0x820000, 0xa00000, "fw_data", true, true},
/* periph. data RAM 96k */
{0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
/* various RGF 40k */
{0x880000, 0x88a000, 0x880000, "rgf", true, true},
/* AGC table 4k */
{0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
/* Pcie_ext_rgf 4k */
{0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
/* mac_ext_rgf 2256b */
{0x88c000, 0x88c8d0, 0x88c000, "mac_rgf_ext", true, true},
/* ext USER RGF 4k */
{0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
/* SEC PKA 16k */
{0x890000, 0x894000, 0x890000, "sec_pka", true, true},
/* SEC KDF RGF 3096b */
{0x898000, 0x898c18, 0x898000, "sec_kdf_rgf", true, true},
/* SEC MAIN 2124b */
{0x89a000, 0x89a84c, 0x89a000, "sec_main", true, true},
/* OTP 4k */
{0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
/* DMA EXT RGF 64k */
{0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
/* DUM USER RGF 528b */
{0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true},
/* DMA OFU 296b */
{0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true},
/* ucode debug 4k */
{0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true, true},
/* upper area 1536k */
{0x900000, 0xa80000, 0x900000, "upper", true, true},
/* UCODE areas - accessible by debugfs blobs but not by
* wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
*/
/* ucode code RAM 256k */
{0x000000, 0x040000, 0xa38000, "uc_code", false, false},
/* ucode data RAM 32k */
{0x800000, 0x808000, 0xa78000, "uc_data", false, false},
};
struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
@ -365,14 +420,16 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_DEL_STA_CMD";
case WMI_DISCONNECT_STA_CMDID:
return "WMI_DISCONNECT_STA_CMD";
case WMI_VRING_BA_EN_CMDID:
return "WMI_VRING_BA_EN_CMD";
case WMI_VRING_BA_DIS_CMDID:
return "WMI_VRING_BA_DIS_CMD";
case WMI_RING_BA_EN_CMDID:
return "WMI_RING_BA_EN_CMD";
case WMI_RING_BA_DIS_CMDID:
return "WMI_RING_BA_DIS_CMD";
case WMI_RCP_DELBA_CMDID:
return "WMI_RCP_DELBA_CMD";
case WMI_RCP_ADDBA_RESP_CMDID:
return "WMI_RCP_ADDBA_RESP_CMD";
case WMI_RCP_ADDBA_RESP_EDMA_CMDID:
return "WMI_RCP_ADDBA_RESP_EDMA_CMD";
case WMI_PS_DEV_PROFILE_CFG_CMDID:
return "WMI_PS_DEV_PROFILE_CFG_CMD";
case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
@ -395,6 +452,18 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_START_SCHED_SCAN_CMD";
case WMI_STOP_SCHED_SCAN_CMDID:
return "WMI_STOP_SCHED_SCAN_CMD";
case WMI_TX_STATUS_RING_ADD_CMDID:
return "WMI_TX_STATUS_RING_ADD_CMD";
case WMI_RX_STATUS_RING_ADD_CMDID:
return "WMI_RX_STATUS_RING_ADD_CMD";
case WMI_TX_DESC_RING_ADD_CMDID:
return "WMI_TX_DESC_RING_ADD_CMD";
case WMI_RX_DESC_RING_ADD_CMDID:
return "WMI_RX_DESC_RING_ADD_CMD";
case WMI_BCAST_DESC_RING_ADD_CMDID:
return "WMI_BCAST_DESC_RING_ADD_CMD";
case WMI_CFG_DEF_RX_OFFLOAD_CMDID:
return "WMI_CFG_DEF_RX_OFFLOAD_CMD";
default:
return "Untracked CMD";
}
@ -449,8 +518,8 @@ static const char *eventid2name(u16 eventid)
return "WMI_RCP_ADDBA_REQ_EVENT";
case WMI_DELBA_EVENTID:
return "WMI_DELBA_EVENT";
case WMI_VRING_EN_EVENTID:
return "WMI_VRING_EN_EVENT";
case WMI_RING_EN_EVENTID:
return "WMI_RING_EN_EVENT";
case WMI_DATA_PORT_OPEN_EVENTID:
return "WMI_DATA_PORT_OPEN_EVENT";
case WMI_AOA_MEAS_EVENTID:
@ -519,6 +588,16 @@ static const char *eventid2name(u16 eventid)
return "WMI_STOP_SCHED_SCAN_EVENT";
case WMI_SCHED_SCAN_RESULT_EVENTID:
return "WMI_SCHED_SCAN_RESULT_EVENT";
case WMI_TX_STATUS_RING_CFG_DONE_EVENTID:
return "WMI_TX_STATUS_RING_CFG_DONE_EVENT";
case WMI_RX_STATUS_RING_CFG_DONE_EVENTID:
return "WMI_RX_STATUS_RING_CFG_DONE_EVENT";
case WMI_TX_DESC_RING_CFG_DONE_EVENTID:
return "WMI_TX_DESC_RING_CFG_DONE_EVENT";
case WMI_RX_DESC_RING_CFG_DONE_EVENTID:
return "WMI_RX_DESC_RING_CFG_DONE_EVENT";
case WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID:
return "WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENT";
default:
return "Untracked EVENT";
}
@ -906,7 +985,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
wil->sta[evt->cid].mid = vif->mid;
wil->sta[evt->cid].status = wil_sta_conn_pending;
rc = wil_tx_init(vif, evt->cid);
rc = wil_ring_init_tx(vif, evt->cid);
if (rc) {
wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
evt->cid, rc);
@ -1063,16 +1142,16 @@ static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len)
}
}
static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len)
static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_vring_en_event *evt = d;
u8 vri = evt->vring_index;
struct wmi_ring_en_event *evt = d;
u8 vri = evt->ring_index;
struct wireless_dev *wdev = vif_to_wdev(vif);
wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid);
if (vri >= ARRAY_SIZE(wil->vring_tx)) {
if (vri >= ARRAY_SIZE(wil->ring_tx)) {
wil_err(wil, "Enable for invalid vring %d\n", vri);
return;
}
@ -1081,8 +1160,8 @@ static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len)
/* in AP mode with disable_ap_sme, this is done by
* wil_cfg80211_change_station()
*/
wil->vring_tx_data[vri].dot1x_open = true;
if (vri == vif->bcast_vring) /* no BA for bcast */
wil->ring_tx_data[vri].dot1x_open = true;
if (vri == vif->bcast_ring) /* no BA for bcast */
return;
if (agg_wsize >= 0)
wil_addba_tx_request(wil, vri, agg_wsize);
@ -1093,7 +1172,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wmi_ba_status_event *evt = d;
struct vring_tx_data *txdata;
struct wil_ring_tx_data *txdata;
wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
evt->ringid,
@ -1112,7 +1191,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
evt->amsdu = 0;
}
txdata = &wil->vring_tx_data[evt->ringid];
txdata = &wil->ring_tx_data[evt->ringid];
txdata->agg_timeout = le16_to_cpu(evt->ba_timeout);
txdata->agg_wsize = evt->agg_wsize;
@ -1150,11 +1229,11 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
if (!evt->from_initiator) {
int i;
/* find Tx vring it belongs to */
for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
if ((wil->vring2cid_tid[i][0] == cid) &&
(wil->vring2cid_tid[i][1] == tid)) {
struct vring_tx_data *txdata =
&wil->vring_tx_data[i];
for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
if (wil->ring2cid_tid[i][0] == cid &&
wil->ring2cid_tid[i][1] == tid) {
struct wil_ring_tx_data *txdata =
&wil->ring_tx_data[i];
wil_dbg_wmi(wil, "DELBA Tx vring %d\n", i);
txdata->agg_timeout = 0;
@ -1164,7 +1243,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
break; /* max. 1 matching ring */
}
}
if (i >= ARRAY_SIZE(wil->vring2cid_tid))
if (i >= ARRAY_SIZE(wil->ring2cid_tid))
wil_err(wil, "DELBA: unable to find Tx vring\n");
return;
}
@ -1277,7 +1356,7 @@ static const struct {
{WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
{WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
{WMI_DELBA_EVENTID, wmi_evt_delba},
{WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
{WMI_RING_EN_EVENTID, wmi_evt_ring_en},
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
{WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result},
};
@ -1909,7 +1988,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
return rc;
}
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct net_device *ndev = wil->main_ndev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
@ -2063,29 +2142,32 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
int wmi_addba(struct wil6210_priv *wil, u8 mid,
u8 ringid, u8 size, u16 timeout)
{
struct wmi_vring_ba_en_cmd cmd = {
.ringid = ringid,
u8 amsdu = wil->use_enhanced_dma_hw && wil->use_rx_hw_reordering &&
test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
wil->amsdu_en;
struct wmi_ring_ba_en_cmd cmd = {
.ring_id = ringid,
.agg_max_wsize = size,
.ba_timeout = cpu_to_le16(timeout),
.amsdu = 0,
.amsdu = amsdu,
};
wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size,
timeout);
wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d amsdu %d)\n",
ringid, size, timeout, amsdu);
return wmi_send(wil, WMI_VRING_BA_EN_CMDID, mid, &cmd, sizeof(cmd));
return wmi_send(wil, WMI_RING_BA_EN_CMDID, mid, &cmd, sizeof(cmd));
}
int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason)
{
struct wmi_vring_ba_dis_cmd cmd = {
.ringid = ringid,
struct wmi_ring_ba_dis_cmd cmd = {
.ring_id = ringid,
.reason = cpu_to_le16(reason),
};
wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason);
return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
return wmi_send(wil, WMI_RING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
}
int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason)
@ -2146,6 +2228,54 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
return rc;
}
int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
u8 token, u16 status, bool amsdu, u16 agg_wsize,
u16 timeout)
{
int rc;
struct wmi_rcp_addba_resp_edma_cmd cmd = {
.cid = cid,
.tid = tid,
.dialog_token = token,
.status_code = cpu_to_le16(status),
/* bit 0: A-MSDU supported
* bit 1: policy (should be 0 for us)
* bits 2..5: TID
* bits 6..15: buffer size
*/
.ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
(agg_wsize << 6)),
.ba_timeout = cpu_to_le16(timeout),
/* route all the connections to status ring 0 */
.status_ring_id = WIL_DEFAULT_RX_STATUS_RING_ID,
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_rcp_addba_resp_sent_event evt;
} __packed reply = {
.evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)},
};
wil_dbg_wmi(wil,
"ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s, sring_id %d\n",
cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-",
WIL_DEFAULT_RX_STATUS_RING_ID);
rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_EDMA_CMDID, mid, &cmd,
sizeof(cmd), WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
if (reply.evt.status) {
wil_err(wil, "ADDBA response failed with status %d\n",
le16_to_cpu(reply.evt.status));
rc = -EINVAL;
}
return rc;
}
int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
enum wmi_ps_profile_type ps_profile)
{
@ -2852,3 +2982,263 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
return rc;
}
int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id)
{
int rc;
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
struct wil_status_ring *sring = &wil->srings[ring_id];
struct wmi_tx_status_ring_add_cmd cmd = {
.ring_cfg = {
.ring_size = cpu_to_le16(sring->size),
},
.irq_index = WIL_TX_STATUS_IRQ_IDX
};
struct {
struct wmi_cmd_hdr hdr;
struct wmi_tx_status_ring_cfg_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
cmd.ring_cfg.ring_id = ring_id;
cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
rc = wmi_call(wil, WMI_TX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_TX_STATUS_RING_CFG_DONE_EVENTID,
&reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
return 0;
}
int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil, u16 max_rx_pl_per_desc)
{
struct net_device *ndev = wil->main_ndev;
struct wil6210_vif *vif = ndev_to_vif(ndev);
int rc;
struct wmi_cfg_def_rx_offload_cmd cmd = {
.max_msdu_size = cpu_to_le16(wil_mtu2macbuf(WIL_MAX_ETH_MTU)),
.max_rx_pl_per_desc = cpu_to_le16(max_rx_pl_per_desc),
.decap_trans_type = WMI_DECAP_TYPE_802_3,
.l2_802_3_offload_ctrl = 0,
.l3_l4_ctrl = 1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS,
};
struct {
struct wmi_cmd_hdr hdr;
struct wmi_cfg_def_rx_offload_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
rc = wmi_call(wil, WMI_CFG_DEF_RX_OFFLOAD_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
return 0;
}
int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id)
{
struct net_device *ndev = wil->main_ndev;
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil_status_ring *sring = &wil->srings[ring_id];
int rc;
struct wmi_rx_status_ring_add_cmd cmd = {
.ring_cfg = {
.ring_size = cpu_to_le16(sring->size),
.ring_id = ring_id,
},
.rx_msg_type = wil->use_compressed_rx_status ?
WMI_RX_MSG_TYPE_COMPRESSED :
WMI_RX_MSG_TYPE_EXTENDED,
.irq_index = WIL_RX_STATUS_IRQ_IDX,
};
struct {
struct wmi_cmd_hdr hdr;
struct wmi_rx_status_ring_cfg_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
rc = wmi_call(wil, WMI_RX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_RX_STATUS_RING_CFG_DONE_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
return 0;
}
int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id)
{
struct net_device *ndev = wil->main_ndev;
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil_ring *ring = &wil->ring_rx;
int rc;
struct wmi_rx_desc_ring_add_cmd cmd = {
.ring_cfg = {
.ring_size = cpu_to_le16(ring->size),
.ring_id = WIL_RX_DESC_RING_ID,
},
.status_ring_id = status_ring_id,
.irq_index = WIL_RX_STATUS_IRQ_IDX,
};
struct {
struct wmi_cmd_hdr hdr;
struct wmi_rx_desc_ring_cfg_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
cmd.sw_tail_host_addr = cpu_to_le64(ring->edma_rx_swtail.pa);
rc = wmi_call(wil, WMI_RX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_RX_DESC_RING_CFG_DONE_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
return 0;
}
int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
int tid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int sring_id = wil->tx_sring_idx; /* there is only one TX sring */
int rc;
struct wil_ring *ring = &wil->ring_tx[ring_id];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
struct wmi_tx_desc_ring_add_cmd cmd = {
.ring_cfg = {
.ring_size = cpu_to_le16(ring->size),
.ring_id = ring_id,
},
.status_ring_id = sring_id,
.cid = cid,
.tid = tid,
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
.max_msdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
.schd_params = {
.priority = cpu_to_le16(0),
.timeslot_us = cpu_to_le16(0xfff),
}
};
struct {
struct wmi_cmd_hdr hdr;
struct wmi_tx_desc_ring_cfg_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
rc = wmi_call(wil, WMI_TX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
spin_lock_bh(&txdata->lock);
ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
txdata->mid = vif->mid;
txdata->enabled = 1;
spin_unlock_bh(&txdata->lock);
return 0;
}
int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id)
{
struct wil6210_priv *wil = vif_to_wil(vif);
struct wil_ring *ring = &wil->ring_tx[ring_id];
int rc;
struct wmi_bcast_desc_ring_add_cmd cmd = {
.ring_cfg = {
.ring_size = cpu_to_le16(ring->size),
.ring_id = ring_id,
},
.status_ring_id = wil->tx_sring_idx,
.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
};
struct {
struct wmi_cmd_hdr hdr;
struct wmi_rx_desc_ring_cfg_done_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
rc = wmi_call(wil, WMI_BCAST_DESC_RING_ADD_CMDID, vif->mid, &cmd,
sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "WMI_BCAST_DESC_RING_ADD_CMD failed, rc %d\n", rc);
return rc;
}
if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "Broadcast Tx config failed, status %d\n",
reply.evt.status);
return -EINVAL;
}
spin_lock_bh(&txdata->lock);
ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
txdata->mid = vif->mid;
txdata->enabled = 1;
spin_unlock_bh(&txdata->lock);
return 0;
}

View File

@ -86,6 +86,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_PNO = 15,
WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18,
WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19,
WMI_FW_CAPABILITY_AMSDU = 23,
WMI_FW_CAPABILITY_MAX,
};
@ -148,8 +149,8 @@ enum wmi_command_id {
WMI_CFG_RX_CHAIN_CMDID = 0x820,
WMI_VRING_CFG_CMDID = 0x821,
WMI_BCAST_VRING_CFG_CMDID = 0x822,
WMI_VRING_BA_EN_CMDID = 0x823,
WMI_VRING_BA_DIS_CMDID = 0x824,
WMI_RING_BA_EN_CMDID = 0x823,
WMI_RING_BA_DIS_CMDID = 0x824,
WMI_RCP_ADDBA_RESP_CMDID = 0x825,
WMI_RCP_DELBA_CMDID = 0x826,
WMI_SET_SSID_CMDID = 0x827,
@ -163,6 +164,7 @@ enum wmi_command_id {
WMI_BF_SM_MGMT_CMDID = 0x838,
WMI_BF_RXSS_MGMT_CMDID = 0x839,
WMI_BF_TRIG_CMDID = 0x83A,
WMI_RCP_ADDBA_RESP_EDMA_CMDID = 0x83B,
WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
WMI_SET_SECTORS_CMDID = 0x849,
@ -235,6 +237,12 @@ enum wmi_command_id {
WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6,
WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7,
WMI_BF_CONTROL_CMDID = 0x9AA,
WMI_TX_STATUS_RING_ADD_CMDID = 0x9C0,
WMI_RX_STATUS_RING_ADD_CMDID = 0x9C1,
WMI_TX_DESC_RING_ADD_CMDID = 0x9C2,
WMI_RX_DESC_RING_ADD_CMDID = 0x9C3,
WMI_BCAST_DESC_RING_ADD_CMDID = 0x9C4,
WMI_CFG_DEF_RX_OFFLOAD_CMDID = 0x9C5,
WMI_SCHEDULING_SCHEME_CMDID = 0xA01,
WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02,
WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03,
@ -781,18 +789,90 @@ struct wmi_lo_power_calib_from_otp_event {
u8 reserved[3];
} __packed;
/* WMI_VRING_BA_EN_CMDID */
struct wmi_vring_ba_en_cmd {
u8 ringid;
struct wmi_edma_ring_cfg {
__le64 ring_mem_base;
/* size in number of items */
__le16 ring_size;
u8 ring_id;
u8 reserved;
} __packed;
enum wmi_rx_msg_type {
WMI_RX_MSG_TYPE_COMPRESSED = 0x00,
WMI_RX_MSG_TYPE_EXTENDED = 0x01,
};
struct wmi_tx_status_ring_add_cmd {
struct wmi_edma_ring_cfg ring_cfg;
u8 irq_index;
u8 reserved[3];
} __packed;
struct wmi_rx_status_ring_add_cmd {
struct wmi_edma_ring_cfg ring_cfg;
u8 irq_index;
/* wmi_rx_msg_type */
u8 rx_msg_type;
u8 reserved[2];
} __packed;
struct wmi_cfg_def_rx_offload_cmd {
__le16 max_msdu_size;
__le16 max_rx_pl_per_desc;
u8 decap_trans_type;
u8 l2_802_3_offload_ctrl;
u8 l2_nwifi_offload_ctrl;
u8 vlan_id;
u8 nwifi_ds_trans_type;
u8 l3_l4_ctrl;
u8 reserved[6];
} __packed;
struct wmi_tx_desc_ring_add_cmd {
struct wmi_edma_ring_cfg ring_cfg;
__le16 max_msdu_size;
/* Correlated status ring (0-63) */
u8 status_ring_id;
u8 cid;
u8 tid;
u8 encap_trans_type;
u8 mac_ctrl;
u8 to_resolution;
u8 agg_max_wsize;
u8 reserved[3];
struct wmi_vring_cfg_schd schd_params;
} __packed;
struct wmi_rx_desc_ring_add_cmd {
struct wmi_edma_ring_cfg ring_cfg;
u8 irq_index;
/* 0-63 status rings */
u8 status_ring_id;
u8 reserved[2];
__le64 sw_tail_host_addr;
} __packed;
struct wmi_bcast_desc_ring_add_cmd {
struct wmi_edma_ring_cfg ring_cfg;
__le16 max_msdu_size;
/* Correlated status ring (0-63) */
u8 status_ring_id;
u8 encap_trans_type;
u8 reserved[4];
} __packed;
/* WMI_RING_BA_EN_CMDID */
struct wmi_ring_ba_en_cmd {
u8 ring_id;
u8 agg_max_wsize;
__le16 ba_timeout;
u8 amsdu;
u8 reserved[3];
} __packed;
/* WMI_VRING_BA_DIS_CMDID */
struct wmi_vring_ba_dis_cmd {
u8 ringid;
/* WMI_RING_BA_DIS_CMDID */
struct wmi_ring_ba_dis_cmd {
u8 ring_id;
u8 reserved;
__le16 reason;
} __packed;
@ -950,6 +1030,21 @@ struct wmi_rcp_addba_resp_cmd {
u8 reserved[2];
} __packed;
/* WMI_RCP_ADDBA_RESP_EDMA_CMDID */
struct wmi_rcp_addba_resp_edma_cmd {
u8 cid;
u8 tid;
u8 dialog_token;
u8 reserved;
__le16 status_code;
/* ieee80211_ba_parameterset field to send */
__le16 ba_param_set;
__le16 ba_timeout;
u8 status_ring_id;
/* wmi_cfg_rx_chain_cmd_reorder_type */
u8 reorder_type;
} __packed;
/* WMI_RCP_DELBA_CMDID */
struct wmi_rcp_delba_cmd {
/* Used for cid less than 8. For higher cid set
@ -1535,7 +1630,7 @@ enum wmi_event_id {
WMI_BF_CTRL_DONE_EVENTID = 0x1862,
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
WMI_VRING_EN_EVENTID = 0x1865,
WMI_RING_EN_EVENTID = 0x1865,
WMI_GET_RF_STATUS_EVENTID = 0x1866,
WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867,
WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID = 0x1868,
@ -1587,6 +1682,11 @@ enum wmi_event_id {
WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6,
WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7,
WMI_BF_CONTROL_EVENTID = 0x19AA,
WMI_TX_STATUS_RING_CFG_DONE_EVENTID = 0x19C0,
WMI_RX_STATUS_RING_CFG_DONE_EVENTID = 0x19C1,
WMI_TX_DESC_RING_CFG_DONE_EVENTID = 0x19C2,
WMI_RX_DESC_RING_CFG_DONE_EVENTID = 0x19C3,
WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID = 0x19C5,
WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01,
WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02,
WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03,
@ -1997,6 +2097,49 @@ struct wmi_rcp_addba_resp_sent_event {
u8 reserved2[2];
} __packed;
/* WMI_TX_STATUS_RING_CFG_DONE_EVENTID */
struct wmi_tx_status_ring_cfg_done_event {
u8 ring_id;
/* wmi_fw_status */
u8 status;
u8 reserved[2];
__le32 ring_tail_ptr;
} __packed;
/* WMI_RX_STATUS_RING_CFG_DONE_EVENTID */
struct wmi_rx_status_ring_cfg_done_event {
u8 ring_id;
/* wmi_fw_status */
u8 status;
u8 reserved[2];
__le32 ring_tail_ptr;
} __packed;
/* WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID */
struct wmi_cfg_def_rx_offload_done_event {
/* wmi_fw_status */
u8 status;
u8 reserved[3];
} __packed;
/* WMI_TX_DESC_RING_CFG_DONE_EVENTID */
struct wmi_tx_desc_ring_cfg_done_event {
u8 ring_id;
/* wmi_fw_status */
u8 status;
u8 reserved[2];
__le32 ring_tail_ptr;
} __packed;
/* WMI_RX_DESC_RING_CFG_DONE_EVENTID */
struct wmi_rx_desc_ring_cfg_done_event {
u8 ring_id;
/* wmi_fw_status */
u8 status;
u8 reserved[2];
__le32 ring_tail_ptr;
} __packed;
/* WMI_RCP_ADDBA_REQ_EVENTID */
struct wmi_rcp_addba_req_event {
/* Used for cid less than 8. For higher cid set
@ -2047,9 +2190,9 @@ struct wmi_data_port_open_event {
u8 reserved[3];
} __packed;
/* WMI_VRING_EN_EVENTID */
struct wmi_vring_en_event {
u8 vring_index;
/* WMI_RING_EN_EVENTID */
struct wmi_ring_en_event {
u8 ring_index;
u8 reserved[3];
} __packed;