wifi: mt76: add info parameter to rx_skb signature
This is a preliminary patch to introduce WED RX support for mt7915. Tested-by: Daniel Golle <daniel@makrotopia.org> Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com> Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
parent
52546e2778
commit
c313794277
@ -750,7 +750,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
|
||||
|
||||
static void
|
||||
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
|
||||
int len, bool more)
|
||||
int len, bool more, u32 info)
|
||||
{
|
||||
struct sk_buff *skb = q->rx_head;
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
@ -770,7 +770,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
|
||||
|
||||
q->rx_head = NULL;
|
||||
if (nr_frags < ARRAY_SIZE(shinfo->frags))
|
||||
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
|
||||
dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
|
||||
else
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
@ -822,7 +822,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
|
||||
}
|
||||
|
||||
if (q->rx_head) {
|
||||
mt76_add_fragment(dev, q, data, len, more);
|
||||
mt76_add_fragment(dev, q, data, len, more, info);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -846,7 +846,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
|
||||
continue;
|
||||
}
|
||||
|
||||
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
|
||||
dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
|
||||
continue;
|
||||
|
||||
free_frag:
|
||||
|
@ -444,7 +444,7 @@ struct mt76_driver_ops {
|
||||
bool (*rx_check)(struct mt76_dev *dev, void *data, int len);
|
||||
|
||||
void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb);
|
||||
struct sk_buff *skb, u32 *info);
|
||||
|
||||
void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
|
||||
|
||||
|
@ -69,7 +69,7 @@ free:
|
||||
}
|
||||
|
||||
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb)
|
||||
struct sk_buff *skb, u32 *info)
|
||||
{
|
||||
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
|
||||
__le32 *rxd = (__le32 *)skb->data;
|
||||
|
@ -244,7 +244,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
|
||||
|
||||
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb);
|
||||
struct sk_buff *skb, u32 *info);
|
||||
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
|
||||
void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
|
||||
int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
|
@ -1666,7 +1666,7 @@ bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
|
||||
EXPORT_SYMBOL_GPL(mt7615_rx_check);
|
||||
|
||||
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb)
|
||||
struct sk_buff *skb, u32 *info)
|
||||
{
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
__le32 *rxd = (__le32 *)skb->data;
|
||||
|
@ -513,7 +513,7 @@ void mt7615_tx_worker(struct mt76_worker *w);
|
||||
void mt7615_tx_token_put(struct mt7615_dev *dev);
|
||||
bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len);
|
||||
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb);
|
||||
struct sk_buff *skb, u32 *info);
|
||||
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
|
||||
int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
|
@ -188,7 +188,7 @@ int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
|
||||
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
|
||||
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
|
||||
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb);
|
||||
struct sk_buff *skb, u32 *info);
|
||||
void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
|
||||
irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
|
||||
void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
|
||||
|
@ -33,7 +33,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
|
||||
EXPORT_SYMBOL_GPL(mt76x02_tx);
|
||||
|
||||
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb)
|
||||
struct sk_buff *skb, u32 *info)
|
||||
{
|
||||
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
|
||||
void *rxwi = skb->data;
|
||||
|
@ -165,9 +165,9 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
|
||||
sta = container_of((void *)msta, struct ieee80211_sta,
|
||||
drv_priv);
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||
u8 q = mt76_connac_lmac_mapping(i);
|
||||
u32 tx_cur = tx_time[q];
|
||||
u32 rx_cur = rx_time[q];
|
||||
u8 queue = mt76_connac_lmac_mapping(i);
|
||||
u32 tx_cur = tx_time[queue];
|
||||
u32 rx_cur = rx_time[queue];
|
||||
u8 tid = ac_to_tid[i];
|
||||
|
||||
if (!tx_cur && !rx_cur)
|
||||
@ -245,8 +245,37 @@ void mt7915_mac_enable_rtscts(struct mt7915_dev *dev,
|
||||
mt76_clear(dev, addr, BIT(5));
|
||||
}
|
||||
|
||||
static void
|
||||
mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q,
|
||||
struct mt7915_sta *msta, struct sk_buff *skb,
|
||||
u32 info)
|
||||
{
|
||||
struct ieee80211_vif *vif;
|
||||
struct wireless_dev *wdev;
|
||||
|
||||
if (!msta || !msta->vif)
|
||||
return;
|
||||
|
||||
if (!(q->flags & MT_QFLAG_WED) ||
|
||||
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) != MT76_WED_Q_RX)
|
||||
return;
|
||||
|
||||
if (!(info & MT_DMA_INFO_PPE_VLD))
|
||||
return;
|
||||
|
||||
vif = container_of((void *)msta->vif, struct ieee80211_vif,
|
||||
drv_priv);
|
||||
wdev = ieee80211_vif_to_wdev(vif);
|
||||
skb->dev = wdev->netdev;
|
||||
|
||||
mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
|
||||
FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
|
||||
FIELD_GET(MT_DMA_PPE_ENTRY, info));
|
||||
}
|
||||
|
||||
static int
|
||||
mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
|
||||
mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb,
|
||||
enum mt76_rxq_id q, u32 *info)
|
||||
{
|
||||
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
|
||||
struct mt76_phy *mphy = &dev->mt76.phy;
|
||||
@ -513,6 +542,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
|
||||
}
|
||||
} else {
|
||||
status->flag |= RX_FLAG_8023;
|
||||
mt7915_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
|
||||
*info);
|
||||
}
|
||||
|
||||
if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
|
||||
@ -1096,7 +1127,7 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
|
||||
}
|
||||
|
||||
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb)
|
||||
struct sk_buff *skb, u32 *info)
|
||||
{
|
||||
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
|
||||
__le32 *rxd = (__le32 *)skb->data;
|
||||
@ -1130,7 +1161,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
dev_kfree_skb(skb);
|
||||
break;
|
||||
case PKT_TYPE_NORMAL:
|
||||
if (!mt7915_mac_fill_rx(dev, skb)) {
|
||||
if (!mt7915_mac_fill_rx(dev, skb, q, info)) {
|
||||
mt76_rx(&dev->mt76, q, skb);
|
||||
return;
|
||||
}
|
||||
|
@ -617,7 +617,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
struct mt76_tx_info *tx_info);
|
||||
void mt7915_tx_token_put(struct mt7915_dev *dev);
|
||||
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb);
|
||||
struct sk_buff *skb, u32 *info);
|
||||
bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
|
||||
void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
|
||||
void mt7915_stats_work(struct work_struct *work);
|
||||
|
@ -684,7 +684,7 @@ bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len)
|
||||
EXPORT_SYMBOL_GPL(mt7921_rx_check);
|
||||
|
||||
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb)
|
||||
struct sk_buff *skb, u32 *info)
|
||||
{
|
||||
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
|
||||
__le32 *rxd = (__le32 *)skb->data;
|
||||
|
@ -470,7 +470,7 @@ void mt7921_tx_worker(struct mt76_worker *w);
|
||||
void mt7921_tx_token_put(struct mt7921_dev *dev);
|
||||
bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len);
|
||||
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb);
|
||||
struct sk_buff *skb, u32 *info);
|
||||
void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
|
||||
void mt7921_stats_work(struct work_struct *work);
|
||||
void mt7921_set_stream_he_caps(struct mt7921_phy *phy);
|
||||
|
@ -395,7 +395,7 @@ mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
if (!e || !e->skb)
|
||||
break;
|
||||
|
||||
dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
|
||||
dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb, NULL);
|
||||
e->skb = NULL;
|
||||
nframes++;
|
||||
}
|
||||
|
@ -547,7 +547,7 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
|
||||
len -= data_len;
|
||||
nsgs++;
|
||||
}
|
||||
dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
|
||||
dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
|
||||
|
||||
return nsgs;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user