2013-06-12 20:52:10 +03:00
/*
* Copyright ( c ) 2005 - 2011 Atheros Communications Inc .
* Copyright ( c ) 2011 - 2013 Qualcomm Atheros , Inc .
*
* Permission to use , copy , modify , and / or distribute this software for any
* purpose with or without fee is hereby granted , provided that the above
* copyright notice and this permission notice appear in all copies .
*
* THE SOFTWARE IS PROVIDED " AS IS " AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS . IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL , DIRECT , INDIRECT , OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
* ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE .
*/
2013-07-05 16:15:14 +03:00
# include "core.h"
2013-06-12 20:52:10 +03:00
# include "htc.h"
# include "htt.h"
# include "txrx.h"
# include "debug.h"
2013-09-03 11:43:55 +03:00
# include "trace.h"
2014-07-23 12:20:33 +02:00
# include "mac.h"
2013-06-12 20:52:10 +03:00
# include <linux/log2.h>
2015-01-24 12:14:48 +02:00
# define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
# define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
2013-06-12 20:52:10 +03:00
/* when under memory pressure rx ring refill may fail and needs a retry */
# define HTT_RX_RING_REFILL_RETRY_MS 50
2013-09-26 10:12:22 +03:00
static int ath10k_htt_rx_get_csum_state ( struct sk_buff * skb ) ;
2014-02-27 18:50:04 +02:00
static void ath10k_htt_txrx_compl_task ( unsigned long ptr ) ;
2013-09-26 10:12:22 +03:00
2015-01-24 12:14:48 +02:00
static struct sk_buff *
ath10k_htt_rx_find_skb_paddr ( struct ath10k * ar , u32 paddr )
{
struct ath10k_skb_rxcb * rxcb ;
hash_for_each_possible ( ar - > htt . rx_ring . skb_table , rxcb , hlist , paddr )
if ( rxcb - > paddr = = paddr )
return ATH10K_RXCB_SKB ( rxcb ) ;
WARN_ON_ONCE ( 1 ) ;
return NULL ;
}
2013-06-12 20:52:10 +03:00
static void ath10k_htt_rx_ring_free ( struct ath10k_htt * htt )
{
struct sk_buff * skb ;
2015-01-24 12:14:48 +02:00
struct ath10k_skb_rxcb * rxcb ;
struct hlist_node * n ;
2013-06-12 20:52:10 +03:00
int i ;
2015-01-24 12:14:48 +02:00
if ( htt - > rx_ring . in_ord_rx ) {
hash_for_each_safe ( htt - > rx_ring . skb_table , i , n , rxcb , hlist ) {
skb = ATH10K_RXCB_SKB ( rxcb ) ;
dma_unmap_single ( htt - > ar - > dev , rxcb - > paddr ,
skb - > len + skb_tailroom ( skb ) ,
DMA_FROM_DEVICE ) ;
hash_del ( & rxcb - > hlist ) ;
dev_kfree_skb_any ( skb ) ;
}
} else {
for ( i = 0 ; i < htt - > rx_ring . size ; i + + ) {
skb = htt - > rx_ring . netbufs_ring [ i ] ;
if ( ! skb )
continue ;
rxcb = ATH10K_SKB_RXCB ( skb ) ;
dma_unmap_single ( htt - > ar - > dev , rxcb - > paddr ,
skb - > len + skb_tailroom ( skb ) ,
DMA_FROM_DEVICE ) ;
dev_kfree_skb_any ( skb ) ;
}
2013-06-12 20:52:10 +03:00
}
htt - > rx_ring . fill_cnt = 0 ;
2015-01-24 12:14:48 +02:00
hash_init ( htt - > rx_ring . skb_table ) ;
memset ( htt - > rx_ring . netbufs_ring , 0 ,
htt - > rx_ring . size * sizeof ( htt - > rx_ring . netbufs_ring [ 0 ] ) ) ;
2013-06-12 20:52:10 +03:00
}
static int __ath10k_htt_rx_ring_fill_n ( struct ath10k_htt * htt , int num )
{
struct htt_rx_desc * rx_desc ;
2015-01-24 12:14:48 +02:00
struct ath10k_skb_rxcb * rxcb ;
2013-06-12 20:52:10 +03:00
struct sk_buff * skb ;
dma_addr_t paddr ;
int ret = 0 , idx ;
2015-01-24 12:14:48 +02:00
/* The Full Rx Reorder firmware has no way of telling the host
* implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring .
* To keep things simple make sure ring is always half empty . This
* guarantees there ' ll be no replenishment overruns possible .
*/
BUILD_BUG_ON ( HTT_RX_RING_FILL_LEVEL > = HTT_RX_RING_SIZE / 2 ) ;
2014-09-14 12:50:39 +03:00
idx = __le32_to_cpu ( * htt - > rx_ring . alloc_idx . vaddr ) ;
2013-06-12 20:52:10 +03:00
while ( num > 0 ) {
skb = dev_alloc_skb ( HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN ) ;
if ( ! skb ) {
ret = - ENOMEM ;
goto fail ;
}
if ( ! IS_ALIGNED ( ( unsigned long ) skb - > data , HTT_RX_DESC_ALIGN ) )
skb_pull ( skb ,
PTR_ALIGN ( skb - > data , HTT_RX_DESC_ALIGN ) -
skb - > data ) ;
/* Clear rx_desc attention word before posting to Rx ring */
rx_desc = ( struct htt_rx_desc * ) skb - > data ;
rx_desc - > attention . flags = __cpu_to_le32 ( 0 ) ;
paddr = dma_map_single ( htt - > ar - > dev , skb - > data ,
skb - > len + skb_tailroom ( skb ) ,
DMA_FROM_DEVICE ) ;
if ( unlikely ( dma_mapping_error ( htt - > ar - > dev , paddr ) ) ) {
dev_kfree_skb_any ( skb ) ;
ret = - ENOMEM ;
goto fail ;
}
2015-01-24 12:14:48 +02:00
rxcb = ATH10K_SKB_RXCB ( skb ) ;
rxcb - > paddr = paddr ;
2013-06-12 20:52:10 +03:00
htt - > rx_ring . netbufs_ring [ idx ] = skb ;
htt - > rx_ring . paddrs_ring [ idx ] = __cpu_to_le32 ( paddr ) ;
htt - > rx_ring . fill_cnt + + ;
2015-01-24 12:14:48 +02:00
if ( htt - > rx_ring . in_ord_rx ) {
hash_add ( htt - > rx_ring . skb_table ,
& ATH10K_SKB_RXCB ( skb ) - > hlist ,
( u32 ) paddr ) ;
}
2013-06-12 20:52:10 +03:00
num - - ;
idx + + ;
idx & = htt - > rx_ring . size_mask ;
}
fail :
2015-01-09 22:49:46 +05:30
/*
* Make sure the rx buffer is updated before available buffer
* index to avoid any potential rx ring corruption .
*/
mb ( ) ;
2014-09-14 12:50:39 +03:00
* htt - > rx_ring . alloc_idx . vaddr = __cpu_to_le32 ( idx ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
static int ath10k_htt_rx_ring_fill_n ( struct ath10k_htt * htt , int num )
{
lockdep_assert_held ( & htt - > rx_ring . lock ) ;
return __ath10k_htt_rx_ring_fill_n ( htt , num ) ;
}
static void ath10k_htt_rx_msdu_buff_replenish ( struct ath10k_htt * htt )
{
2013-09-24 10:18:36 +02:00
int ret , num_deficit , num_to_fill ;
2013-06-12 20:52:10 +03:00
2013-09-24 10:18:36 +02:00
/* Refilling the whole RX ring buffer proves to be a bad idea. The
* reason is RX may take up significant amount of CPU cycles and starve
* other tasks , e . g . TX on an ethernet device while acting as a bridge
* with ath10k wlan interface . This ended up with very poor performance
* once CPU the host system was overwhelmed with RX on ath10k .
*
* By limiting the number of refills the replenishing occurs
* progressively . This in turns makes use of the fact tasklets are
* processed in FIFO order . This means actual RX processing can starve
* out refilling . If there ' s not enough buffers on RX ring FW will not
* report RX until it is refilled with enough buffers . This
* automatically balances load wrt to CPU power .
*
* This probably comes at a cost of lower maximum throughput but
2015-02-15 16:50:39 +02:00
* improves the average and stability . */
2013-06-12 20:52:10 +03:00
spin_lock_bh ( & htt - > rx_ring . lock ) ;
2013-09-24 10:18:36 +02:00
num_deficit = htt - > rx_ring . fill_level - htt - > rx_ring . fill_cnt ;
num_to_fill = min ( ATH10K_HTT_MAX_NUM_REFILL , num_deficit ) ;
num_deficit - = num_to_fill ;
2013-06-12 20:52:10 +03:00
ret = ath10k_htt_rx_ring_fill_n ( htt , num_to_fill ) ;
if ( ret = = - ENOMEM ) {
/*
* Failed to fill it to the desired level -
* we ' ll start a timer and try again next time .
* As long as enough buffers are left in the ring for
* another A - MPDU rx , no special recovery is needed .
*/
mod_timer ( & htt - > rx_ring . refill_retry_timer , jiffies +
msecs_to_jiffies ( HTT_RX_RING_REFILL_RETRY_MS ) ) ;
2013-09-24 10:18:36 +02:00
} else if ( num_deficit > 0 ) {
tasklet_schedule ( & htt - > rx_replenish_task ) ;
2013-06-12 20:52:10 +03:00
}
spin_unlock_bh ( & htt - > rx_ring . lock ) ;
}
static void ath10k_htt_rx_ring_refill_retry ( unsigned long arg )
{
struct ath10k_htt * htt = ( struct ath10k_htt * ) arg ;
2014-09-14 12:50:17 +03:00
2013-06-12 20:52:10 +03:00
ath10k_htt_rx_msdu_buff_replenish ( htt ) ;
}
2015-01-24 12:14:48 +02:00
int ath10k_htt_rx_ring_refill ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
2015-01-24 12:14:48 +02:00
struct ath10k_htt * htt = & ar - > htt ;
int ret ;
2014-05-14 16:23:31 +03:00
2015-01-24 12:14:48 +02:00
spin_lock_bh ( & htt - > rx_ring . lock ) ;
ret = ath10k_htt_rx_ring_fill_n ( htt , ( htt - > rx_ring . fill_level -
htt - > rx_ring . fill_cnt ) ) ;
spin_unlock_bh ( & htt - > rx_ring . lock ) ;
2014-05-14 16:23:31 +03:00
2015-01-24 12:14:48 +02:00
if ( ret )
ath10k_htt_rx_ring_free ( htt ) ;
return ret ;
2014-05-14 16:23:31 +03:00
}
2013-06-12 20:52:10 +03:00
2014-05-16 17:15:39 +03:00
void ath10k_htt_rx_free ( struct ath10k_htt * htt )
2014-05-14 16:23:31 +03:00
{
2013-06-12 20:52:10 +03:00
del_timer_sync ( & htt - > rx_ring . refill_retry_timer ) ;
2013-09-24 10:18:36 +02:00
tasklet_kill ( & htt - > rx_replenish_task ) ;
2014-02-27 18:50:04 +02:00
tasklet_kill ( & htt - > txrx_compl_task ) ;
skb_queue_purge ( & htt - > tx_compl_q ) ;
skb_queue_purge ( & htt - > rx_compl_q ) ;
2015-01-24 12:14:48 +02:00
skb_queue_purge ( & htt - > rx_in_ord_compl_q ) ;
2013-06-12 20:52:10 +03:00
2015-01-24 12:14:48 +02:00
ath10k_htt_rx_ring_free ( htt ) ;
2013-06-12 20:52:10 +03:00
dma_free_coherent ( htt - > ar - > dev ,
( htt - > rx_ring . size *
sizeof ( htt - > rx_ring . paddrs_ring ) ) ,
htt - > rx_ring . paddrs_ring ,
htt - > rx_ring . base_paddr ) ;
dma_free_coherent ( htt - > ar - > dev ,
sizeof ( * htt - > rx_ring . alloc_idx . vaddr ) ,
htt - > rx_ring . alloc_idx . vaddr ,
htt - > rx_ring . alloc_idx . paddr ) ;
kfree ( htt - > rx_ring . netbufs_ring ) ;
}
static inline struct sk_buff * ath10k_htt_rx_netbuf_pop ( struct ath10k_htt * htt )
{
2014-08-25 12:09:38 +02:00
struct ath10k * ar = htt - > ar ;
2013-06-12 20:52:10 +03:00
int idx ;
struct sk_buff * msdu ;
2014-02-27 18:50:05 +02:00
lockdep_assert_held ( & htt - > rx_ring . lock ) ;
2013-06-12 20:52:10 +03:00
2014-02-27 18:50:05 +02:00
if ( htt - > rx_ring . fill_cnt = = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " tried to pop sk_buff from an empty rx ring \n " ) ;
2014-02-27 18:50:05 +02:00
return NULL ;
}
2013-06-12 20:52:10 +03:00
idx = htt - > rx_ring . sw_rd_idx . msdu_payld ;
msdu = htt - > rx_ring . netbufs_ring [ idx ] ;
2014-05-14 16:23:31 +03:00
htt - > rx_ring . netbufs_ring [ idx ] = NULL ;
2015-01-24 12:14:48 +02:00
htt - > rx_ring . paddrs_ring [ idx ] = 0 ;
2013-06-12 20:52:10 +03:00
idx + + ;
idx & = htt - > rx_ring . size_mask ;
htt - > rx_ring . sw_rd_idx . msdu_payld = idx ;
htt - > rx_ring . fill_cnt - - ;
2014-10-23 17:04:23 +03:00
dma_unmap_single ( htt - > ar - > dev ,
2015-01-24 12:14:47 +02:00
ATH10K_SKB_RXCB ( msdu ) - > paddr ,
2014-10-23 17:04:23 +03:00
msdu - > len + skb_tailroom ( msdu ) ,
DMA_FROM_DEVICE ) ;
ath10k_dbg_dump ( ar , ATH10K_DBG_HTT_DUMP , NULL , " htt rx netbuf pop: " ,
msdu - > data , msdu - > len + skb_tailroom ( msdu ) ) ;
2013-06-12 20:52:10 +03:00
return msdu ;
}
2014-03-24 21:23:20 +01:00
/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
2013-06-12 20:52:10 +03:00
static int ath10k_htt_rx_amsdu_pop ( struct ath10k_htt * htt ,
u8 * * fw_desc , int * fw_desc_len ,
2014-11-18 09:24:49 +02:00
struct sk_buff_head * amsdu )
2013-06-12 20:52:10 +03:00
{
2014-08-25 12:09:38 +02:00
struct ath10k * ar = htt - > ar ;
2013-06-12 20:52:10 +03:00
int msdu_len , msdu_chaining = 0 ;
2014-11-18 09:24:47 +02:00
struct sk_buff * msdu ;
2013-06-12 20:52:10 +03:00
struct htt_rx_desc * rx_desc ;
2014-02-27 18:50:05 +02:00
lockdep_assert_held ( & htt - > rx_ring . lock ) ;
2014-11-18 09:24:47 +02:00
for ( ; ; ) {
2013-06-12 20:52:10 +03:00
int last_msdu , msdu_len_invalid , msdu_chained ;
2014-11-18 09:24:47 +02:00
msdu = ath10k_htt_rx_netbuf_pop ( htt ) ;
if ( ! msdu ) {
__skb_queue_purge ( amsdu ) ;
2014-11-18 09:24:48 +02:00
return - ENOENT ;
2014-11-18 09:24:47 +02:00
}
__skb_queue_tail ( amsdu , msdu ) ;
2013-06-12 20:52:10 +03:00
rx_desc = ( struct htt_rx_desc * ) msdu - > data ;
/* FIXME: we must report msdu payload since this is what caller
* expects now */
skb_put ( msdu , offsetof ( struct htt_rx_desc , msdu_payload ) ) ;
skb_pull ( msdu , offsetof ( struct htt_rx_desc , msdu_payload ) ) ;
/*
* Sanity check - confirm the HW is finished filling in the
* rx data .
* If the HW and SW are working correctly , then it ' s guaranteed
* that the HW ' s MAC DMA is done before this point in the SW .
* To prevent the case that we handle a stale Rx descriptor ,
* just assert for now until we have a way to recover .
*/
if ( ! ( __le32_to_cpu ( rx_desc - > attention . flags )
& RX_ATTENTION_FLAGS_MSDU_DONE ) ) {
2014-11-18 09:24:47 +02:00
__skb_queue_purge ( amsdu ) ;
2014-11-18 09:24:48 +02:00
return - EIO ;
2013-06-12 20:52:10 +03:00
}
/*
* Copy the FW rx descriptor for this MSDU from the rx
* indication message into the MSDU ' s netbuf . HL uses the
* same rx indication message definition as LL , and simply
* appends new info ( fields from the HW rx desc , and the
* MSDU payload itself ) . So , the offset into the rx
* indication message only has to account for the standard
* offset of the per - MSDU FW rx desc info within the
* message , and how many bytes of the per - MSDU FW rx desc
* info have already been consumed . ( And the endianness of
* the host , since for a big - endian host , the rx ind
* message contents , including the per - MSDU rx desc bytes ,
* were byteswapped during upload . )
*/
if ( * fw_desc_len > 0 ) {
rx_desc - > fw_desc . info0 = * * fw_desc ;
/*
* The target is expected to only provide the basic
* per - MSDU rx descriptors . Just to be sure , verify
* that the target has not attached extension data
* ( e . g . LRO flow ID ) .
*/
/* or more, if there's extension data */
( * fw_desc ) + + ;
( * fw_desc_len ) - - ;
} else {
/*
* When an oversized AMSDU happened , FW will lost
* some of MSDU status - in this case , the FW
* descriptors provided will be less than the
* actual MSDUs inside this MPDU . Mark the FW
* descriptors so that it will still deliver to
* upper stack , if no CRC error for this MPDU .
*
* FIX THIS - the FW descriptors are actually for
* MSDUs in the end of this A - MSDU instead of the
* beginning .
*/
rx_desc - > fw_desc . info0 = 0 ;
}
msdu_len_invalid = ! ! ( __le32_to_cpu ( rx_desc - > attention . flags )
& ( RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR ) ) ;
2015-07-15 19:01:21 -07:00
msdu_len = MS ( __le32_to_cpu ( rx_desc - > msdu_start . common . info0 ) ,
2013-06-12 20:52:10 +03:00
RX_MSDU_START_INFO0_MSDU_LENGTH ) ;
msdu_chained = rx_desc - > frag_info . ring2_more_count ;
if ( msdu_len_invalid )
msdu_len = 0 ;
skb_trim ( msdu , 0 ) ;
skb_put ( msdu , min ( msdu_len , HTT_RX_MSDU_SIZE ) ) ;
msdu_len - = msdu - > len ;
2014-11-18 09:24:47 +02:00
/* Note: Chained buffers do not contain rx descriptor */
2013-06-12 20:52:10 +03:00
while ( msdu_chained - - ) {
2014-11-18 09:24:47 +02:00
msdu = ath10k_htt_rx_netbuf_pop ( htt ) ;
if ( ! msdu ) {
__skb_queue_purge ( amsdu ) ;
2014-11-18 09:24:48 +02:00
return - ENOENT ;
2014-10-23 17:04:24 +03:00
}
2014-11-18 09:24:47 +02:00
__skb_queue_tail ( amsdu , msdu ) ;
skb_trim ( msdu , 0 ) ;
skb_put ( msdu , min ( msdu_len , HTT_RX_BUF_SIZE ) ) ;
msdu_len - = msdu - > len ;
2014-05-14 16:23:31 +03:00
msdu_chaining = 1 ;
2013-06-12 20:52:10 +03:00
}
2015-07-15 19:01:21 -07:00
last_msdu = __le32_to_cpu ( rx_desc - > msdu_end . common . info0 ) &
2013-06-12 20:52:10 +03:00
RX_MSDU_END_INFO0_LAST_MSDU ;
2014-10-23 17:04:27 +03:00
trace_ath10k_htt_rx_desc ( ar , & rx_desc - > attention ,
2014-10-03 08:02:47 +03:00
sizeof ( * rx_desc ) - sizeof ( u32 ) ) ;
2014-09-14 12:50:33 +03:00
2014-11-18 09:24:47 +02:00
if ( last_msdu )
break ;
2013-06-12 20:52:10 +03:00
}
2014-11-18 09:24:47 +02:00
if ( skb_queue_empty ( amsdu ) )
2014-03-24 21:23:20 +01:00
msdu_chaining = - 1 ;
2013-06-12 20:52:10 +03:00
/*
* Don ' t refill the ring yet .
*
* First , the elements popped here are still in use - it is not
* safe to overwrite them until the matching call to
* mpdu_desc_list_next . Second , for efficiency it is preferable to
* refill the rx ring with 1 PPDU ' s worth of rx buffers ( something
* like 32 x 3 buffers ) , rather than one MPDU ' s worth of rx buffers
* ( something like 3 buffers ) . Consequently , we ' ll rely on the txrx
* SW to tell us when it is done pulling all the PPDU ' s rx buffers
* out of the rx ring , and then refill it just once .
*/
return msdu_chaining ;
}
2013-09-24 10:18:36 +02:00
static void ath10k_htt_rx_replenish_task ( unsigned long ptr )
{
struct ath10k_htt * htt = ( struct ath10k_htt * ) ptr ;
2014-09-14 12:50:17 +03:00
2013-09-24 10:18:36 +02:00
ath10k_htt_rx_msdu_buff_replenish ( htt ) ;
}
2015-01-24 12:14:48 +02:00
static struct sk_buff * ath10k_htt_rx_pop_paddr ( struct ath10k_htt * htt ,
u32 paddr )
{
struct ath10k * ar = htt - > ar ;
struct ath10k_skb_rxcb * rxcb ;
struct sk_buff * msdu ;
lockdep_assert_held ( & htt - > rx_ring . lock ) ;
msdu = ath10k_htt_rx_find_skb_paddr ( ar , paddr ) ;
if ( ! msdu )
return NULL ;
rxcb = ATH10K_SKB_RXCB ( msdu ) ;
hash_del ( & rxcb - > hlist ) ;
htt - > rx_ring . fill_cnt - - ;
dma_unmap_single ( htt - > ar - > dev , rxcb - > paddr ,
msdu - > len + skb_tailroom ( msdu ) ,
DMA_FROM_DEVICE ) ;
ath10k_dbg_dump ( ar , ATH10K_DBG_HTT_DUMP , NULL , " htt rx netbuf pop: " ,
msdu - > data , msdu - > len + skb_tailroom ( msdu ) ) ;
return msdu ;
}
static int ath10k_htt_rx_pop_paddr_list ( struct ath10k_htt * htt ,
struct htt_rx_in_ord_ind * ev ,
struct sk_buff_head * list )
{
struct ath10k * ar = htt - > ar ;
struct htt_rx_in_ord_msdu_desc * msdu_desc = ev - > msdu_descs ;
struct htt_rx_desc * rxd ;
struct sk_buff * msdu ;
int msdu_count ;
bool is_offload ;
u32 paddr ;
lockdep_assert_held ( & htt - > rx_ring . lock ) ;
msdu_count = __le16_to_cpu ( ev - > msdu_count ) ;
is_offload = ! ! ( ev - > info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK ) ;
while ( msdu_count - - ) {
paddr = __le32_to_cpu ( msdu_desc - > msdu_paddr ) ;
msdu = ath10k_htt_rx_pop_paddr ( htt , paddr ) ;
if ( ! msdu ) {
__skb_queue_purge ( list ) ;
return - ENOENT ;
}
__skb_queue_tail ( list , msdu ) ;
if ( ! is_offload ) {
rxd = ( void * ) msdu - > data ;
trace_ath10k_htt_rx_desc ( ar , rxd , sizeof ( * rxd ) ) ;
skb_put ( msdu , sizeof ( * rxd ) ) ;
skb_pull ( msdu , sizeof ( * rxd ) ) ;
skb_put ( msdu , __le16_to_cpu ( msdu_desc - > msdu_len ) ) ;
if ( ! ( __le32_to_cpu ( rxd - > attention . flags ) &
RX_ATTENTION_FLAGS_MSDU_DONE ) ) {
ath10k_warn ( htt - > ar , " tried to pop an incomplete frame, oops! \n " ) ;
return - EIO ;
}
}
msdu_desc + + ;
}
return 0 ;
}
2014-05-16 17:15:39 +03:00
int ath10k_htt_rx_alloc ( struct ath10k_htt * htt )
2013-06-12 20:52:10 +03:00
{
2014-08-25 12:09:38 +02:00
struct ath10k * ar = htt - > ar ;
2013-06-12 20:52:10 +03:00
dma_addr_t paddr ;
void * vaddr ;
2014-09-14 12:50:00 +03:00
size_t size ;
2013-06-12 20:52:10 +03:00
struct timer_list * timer = & htt - > rx_ring . refill_retry_timer ;
2014-10-23 17:04:24 +03:00
htt - > rx_confused = false ;
2014-11-27 11:12:43 +01:00
/* XXX: The fill level could be changed during runtime in response to
* the host processing latency . Is this really worth it ?
*/
htt - > rx_ring . size = HTT_RX_RING_SIZE ;
htt - > rx_ring . size_mask = htt - > rx_ring . size - 1 ;
htt - > rx_ring . fill_level = HTT_RX_RING_FILL_LEVEL ;
2013-06-12 20:52:10 +03:00
if ( ! is_power_of_2 ( htt - > rx_ring . size ) ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " htt rx ring size is not power of 2 \n " ) ;
2013-06-12 20:52:10 +03:00
return - EINVAL ;
}
htt - > rx_ring . netbufs_ring =
2014-05-14 16:23:31 +03:00
kzalloc ( htt - > rx_ring . size * sizeof ( struct sk_buff * ) ,
2013-06-12 20:52:10 +03:00
GFP_KERNEL ) ;
if ( ! htt - > rx_ring . netbufs_ring )
goto err_netbuf ;
2014-09-14 12:50:00 +03:00
size = htt - > rx_ring . size * sizeof ( htt - > rx_ring . paddrs_ring ) ;
vaddr = dma_alloc_coherent ( htt - > ar - > dev , size , & paddr , GFP_DMA ) ;
2013-06-12 20:52:10 +03:00
if ( ! vaddr )
goto err_dma_ring ;
htt - > rx_ring . paddrs_ring = vaddr ;
htt - > rx_ring . base_paddr = paddr ;
vaddr = dma_alloc_coherent ( htt - > ar - > dev ,
sizeof ( * htt - > rx_ring . alloc_idx . vaddr ) ,
& paddr , GFP_DMA ) ;
if ( ! vaddr )
goto err_dma_idx ;
htt - > rx_ring . alloc_idx . vaddr = vaddr ;
htt - > rx_ring . alloc_idx . paddr = paddr ;
2015-01-24 12:14:48 +02:00
htt - > rx_ring . sw_rd_idx . msdu_payld = htt - > rx_ring . size_mask ;
2013-06-12 20:52:10 +03:00
* htt - > rx_ring . alloc_idx . vaddr = 0 ;
/* Initialize the Rx refill retry timer */
setup_timer ( timer , ath10k_htt_rx_ring_refill_retry , ( unsigned long ) htt ) ;
spin_lock_init ( & htt - > rx_ring . lock ) ;
htt - > rx_ring . fill_cnt = 0 ;
2015-01-24 12:14:48 +02:00
htt - > rx_ring . sw_rd_idx . msdu_payld = 0 ;
hash_init ( htt - > rx_ring . skb_table ) ;
2013-06-12 20:52:10 +03:00
2013-09-24 10:18:36 +02:00
tasklet_init ( & htt - > rx_replenish_task , ath10k_htt_rx_replenish_task ,
( unsigned long ) htt ) ;
2014-02-27 18:50:04 +02:00
skb_queue_head_init ( & htt - > tx_compl_q ) ;
skb_queue_head_init ( & htt - > rx_compl_q ) ;
2015-01-24 12:14:48 +02:00
skb_queue_head_init ( & htt - > rx_in_ord_compl_q ) ;
2014-02-27 18:50:04 +02:00
tasklet_init ( & htt - > txrx_compl_task , ath10k_htt_txrx_compl_task ,
( unsigned long ) htt ) ;
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " htt rx ring size %d fill_level %d \n " ,
2013-06-12 20:52:10 +03:00
htt - > rx_ring . size , htt - > rx_ring . fill_level ) ;
return 0 ;
err_dma_idx :
dma_free_coherent ( htt - > ar - > dev ,
( htt - > rx_ring . size *
sizeof ( htt - > rx_ring . paddrs_ring ) ) ,
htt - > rx_ring . paddrs_ring ,
htt - > rx_ring . base_paddr ) ;
err_dma_ring :
kfree ( htt - > rx_ring . netbufs_ring ) ;
err_netbuf :
return - ENOMEM ;
}
2014-08-25 12:09:38 +02:00
static int ath10k_htt_rx_crypto_param_len ( struct ath10k * ar ,
enum htt_rx_mpdu_encrypt_type type )
2013-06-12 20:52:10 +03:00
{
switch ( type ) {
2014-10-23 17:04:22 +03:00
case HTT_RX_MPDU_ENCRYPT_NONE :
return 0 ;
2013-06-12 20:52:10 +03:00
case HTT_RX_MPDU_ENCRYPT_WEP40 :
case HTT_RX_MPDU_ENCRYPT_WEP104 :
2014-10-23 17:04:22 +03:00
return IEEE80211_WEP_IV_LEN ;
2013-06-12 20:52:10 +03:00
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC :
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA :
2014-10-23 17:04:22 +03:00
return IEEE80211_TKIP_IV_LEN ;
2013-06-12 20:52:10 +03:00
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 :
2014-10-23 17:04:22 +03:00
return IEEE80211_CCMP_HDR_LEN ;
case HTT_RX_MPDU_ENCRYPT_WEP128 :
case HTT_RX_MPDU_ENCRYPT_WAPI :
break ;
2013-06-12 20:52:10 +03:00
}
2014-10-23 17:04:22 +03:00
ath10k_warn ( ar , " unsupported encryption type %d \n " , type ) ;
2013-06-12 20:52:10 +03:00
return 0 ;
}
2014-10-23 17:04:22 +03:00
# define MICHAEL_MIC_LEN 8
2014-08-25 12:09:38 +02:00
static int ath10k_htt_rx_crypto_tail_len ( struct ath10k * ar ,
enum htt_rx_mpdu_encrypt_type type )
2013-06-12 20:52:10 +03:00
{
switch ( type ) {
case HTT_RX_MPDU_ENCRYPT_NONE :
2014-10-23 17:04:22 +03:00
return 0 ;
2013-06-12 20:52:10 +03:00
case HTT_RX_MPDU_ENCRYPT_WEP40 :
case HTT_RX_MPDU_ENCRYPT_WEP104 :
2014-10-23 17:04:22 +03:00
return IEEE80211_WEP_ICV_LEN ;
2013-06-12 20:52:10 +03:00
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC :
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA :
2014-10-23 17:04:22 +03:00
return IEEE80211_TKIP_ICV_LEN ;
2013-06-12 20:52:10 +03:00
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 :
2014-10-23 17:04:22 +03:00
return IEEE80211_CCMP_MIC_LEN ;
case HTT_RX_MPDU_ENCRYPT_WEP128 :
case HTT_RX_MPDU_ENCRYPT_WAPI :
break ;
2013-06-12 20:52:10 +03:00
}
2014-10-23 17:04:22 +03:00
ath10k_warn ( ar , " unsupported encryption type %d \n " , type ) ;
2013-06-12 20:52:10 +03:00
return 0 ;
}
2013-09-26 10:12:22 +03:00
struct amsdu_subframe_hdr {
u8 dst [ ETH_ALEN ] ;
u8 src [ ETH_ALEN ] ;
__be16 len ;
} __packed ;
2015-08-27 14:47:33 +02:00
# define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
2014-03-24 21:23:19 +01:00
static void ath10k_htt_rx_h_rates ( struct ath10k * ar ,
2014-11-18 09:24:49 +02:00
struct ieee80211_rx_status * status ,
struct htt_rx_desc * rxd )
2014-03-24 21:23:15 +01:00
{
2015-03-30 09:51:56 +03:00
struct ieee80211_supported_band * sband ;
u8 cck , rate , bw , sgi , mcs , nss ;
2014-03-24 21:23:15 +01:00
u8 preamble = 0 ;
2015-08-27 14:47:33 +02:00
u8 group_id ;
2014-11-18 09:24:49 +02:00
u32 info1 , info2 , info3 ;
2014-03-24 21:23:15 +01:00
2014-11-18 09:24:49 +02:00
info1 = __le32_to_cpu ( rxd - > ppdu_start . info1 ) ;
info2 = __le32_to_cpu ( rxd - > ppdu_start . info2 ) ;
info3 = __le32_to_cpu ( rxd - > ppdu_start . info3 ) ;
preamble = MS ( info1 , RX_PPDU_START_INFO1_PREAMBLE_TYPE ) ;
2014-03-24 21:23:15 +01:00
switch ( preamble ) {
case HTT_RX_LEGACY :
2015-03-30 09:51:56 +03:00
/* To get legacy rate index band is required. Since band can't
* be undefined check if freq is non - zero .
*/
if ( ! status - > freq )
return ;
2014-11-18 09:24:49 +02:00
cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT ;
rate = MS ( info1 , RX_PPDU_START_INFO1_L_SIG_RATE ) ;
2015-03-30 09:51:56 +03:00
rate & = ~ RX_PPDU_START_RATE_FLAG ;
2014-03-24 21:23:15 +01:00
2015-03-30 09:51:56 +03:00
sband = & ar - > mac . sbands [ status - > band ] ;
status - > rate_idx = ath10k_mac_hw_rate_to_idx ( sband , rate ) ;
2014-03-24 21:23:15 +01:00
break ;
case HTT_RX_HT :
case HTT_RX_HT_WITH_TXBF :
2014-11-18 09:24:49 +02:00
/* HT-SIG - Table 20-11 in info2 and info3 */
mcs = info2 & 0x1F ;
2014-03-24 21:23:15 +01:00
nss = mcs > > 3 ;
2014-11-18 09:24:49 +02:00
bw = ( info2 > > 7 ) & 1 ;
sgi = ( info3 > > 7 ) & 1 ;
2014-03-24 21:23:15 +01:00
status - > rate_idx = mcs ;
status - > flag | = RX_FLAG_HT ;
if ( sgi )
status - > flag | = RX_FLAG_SHORT_GI ;
if ( bw )
status - > flag | = RX_FLAG_40MHZ ;
break ;
case HTT_RX_VHT :
case HTT_RX_VHT_WITH_TXBF :
2014-11-18 09:24:49 +02:00
/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
2014-03-24 21:23:15 +01:00
TODO check this */
2014-11-18 09:24:49 +02:00
bw = info2 & 3 ;
sgi = info3 & 1 ;
2015-08-27 14:47:33 +02:00
group_id = ( info2 > > 4 ) & 0x3F ;
if ( GROUP_ID_IS_SU_MIMO ( group_id ) ) {
mcs = ( info3 > > 4 ) & 0x0F ;
nss = ( ( info2 > > 10 ) & 0x07 ) + 1 ;
} else {
/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
* so it ' s impossible to decode MCS . Also since
* firmware consumes Group Id Management frames host
* has no knowledge regarding group / user position
* mapping so it ' s impossible to pick the correct Nsts
* from VHT - SIG - A1 .
*
* Bandwidth and SGI are valid so report the rateinfo
* on best - effort basis .
*/
mcs = 0 ;
nss = 1 ;
}
2014-03-24 21:23:15 +01:00
2015-09-02 17:05:27 +03:00
if ( mcs > 0x09 ) {
ath10k_warn ( ar , " invalid MCS received %u \n " , mcs ) ;
ath10k_warn ( ar , " rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x \n " ,
__le32_to_cpu ( rxd - > attention . flags ) ,
__le32_to_cpu ( rxd - > mpdu_start . info0 ) ,
__le32_to_cpu ( rxd - > mpdu_start . info1 ) ,
__le32_to_cpu ( rxd - > msdu_start . common . info0 ) ,
__le32_to_cpu ( rxd - > msdu_start . common . info1 ) ,
rxd - > ppdu_start . info0 ,
__le32_to_cpu ( rxd - > ppdu_start . info1 ) ,
__le32_to_cpu ( rxd - > ppdu_start . info2 ) ,
__le32_to_cpu ( rxd - > ppdu_start . info3 ) ,
__le32_to_cpu ( rxd - > ppdu_start . info4 ) ) ;
ath10k_warn ( ar , " msdu end %08x mpdu end %08x \n " ,
__le32_to_cpu ( rxd - > msdu_end . common . info0 ) ,
__le32_to_cpu ( rxd - > mpdu_end . info0 ) ) ;
ath10k_dbg_dump ( ar , ATH10K_DBG_HTT_DUMP , NULL ,
" rx desc msdu payload: " ,
rxd - > msdu_payload , 50 ) ;
}
2014-03-24 21:23:15 +01:00
status - > rate_idx = mcs ;
status - > vht_nss = nss ;
if ( sgi )
status - > flag | = RX_FLAG_SHORT_GI ;
switch ( bw ) {
/* 20MHZ */
case 0 :
break ;
/* 40MHZ */
case 1 :
status - > flag | = RX_FLAG_40MHZ ;
break ;
/* 80MHZ */
case 2 :
status - > vht_flag | = RX_VHT_FLAG_80MHZ ;
}
status - > flag | = RX_FLAG_VHT ;
break ;
default :
break ;
}
}
2015-03-31 10:26:21 +00:00
static struct ieee80211_channel *
ath10k_htt_rx_h_peer_channel ( struct ath10k * ar , struct htt_rx_desc * rxd )
{
struct ath10k_peer * peer ;
struct ath10k_vif * arvif ;
struct cfg80211_chan_def def ;
u16 peer_id ;
lockdep_assert_held ( & ar - > data_lock ) ;
if ( ! rxd )
return NULL ;
if ( rxd - > attention . flags &
__cpu_to_le32 ( RX_ATTENTION_FLAGS_PEER_IDX_INVALID ) )
return NULL ;
2015-07-15 19:01:21 -07:00
if ( ! ( rxd - > msdu_end . common . info0 &
2015-03-31 10:26:21 +00:00
__cpu_to_le32 ( RX_MSDU_END_INFO0_FIRST_MSDU ) ) )
return NULL ;
peer_id = MS ( __le32_to_cpu ( rxd - > mpdu_start . info0 ) ,
RX_MPDU_START_INFO0_PEER_IDX ) ;
peer = ath10k_peer_find_by_id ( ar , peer_id ) ;
if ( ! peer )
return NULL ;
arvif = ath10k_get_arvif ( ar , peer - > vdev_id ) ;
if ( WARN_ON_ONCE ( ! arvif ) )
return NULL ;
if ( WARN_ON ( ath10k_mac_vif_chan ( arvif - > vif , & def ) ) )
return NULL ;
return def . chan ;
}
static struct ieee80211_channel *
ath10k_htt_rx_h_vdev_channel ( struct ath10k * ar , u32 vdev_id )
{
struct ath10k_vif * arvif ;
struct cfg80211_chan_def def ;
lockdep_assert_held ( & ar - > data_lock ) ;
list_for_each_entry ( arvif , & ar - > arvifs , list ) {
if ( arvif - > vdev_id = = vdev_id & &
ath10k_mac_vif_chan ( arvif - > vif , & def ) = = 0 )
return def . chan ;
}
return NULL ;
}
static void
ath10k_htt_rx_h_any_chan_iter ( struct ieee80211_hw * hw ,
struct ieee80211_chanctx_conf * conf ,
void * data )
{
struct cfg80211_chan_def * def = data ;
* def = conf - > def ;
}
static struct ieee80211_channel *
ath10k_htt_rx_h_any_channel ( struct ath10k * ar )
{
struct cfg80211_chan_def def = { } ;
ieee80211_iter_chan_contexts_atomic ( ar - > hw ,
ath10k_htt_rx_h_any_chan_iter ,
& def ) ;
return def . chan ;
}
2014-03-24 21:23:18 +01:00
static bool ath10k_htt_rx_h_channel ( struct ath10k * ar ,
2015-03-31 10:26:21 +00:00
struct ieee80211_rx_status * status ,
struct htt_rx_desc * rxd ,
u32 vdev_id )
2014-03-24 21:23:18 +01:00
{
struct ieee80211_channel * ch ;
spin_lock_bh ( & ar - > data_lock ) ;
ch = ar - > scan_channel ;
if ( ! ch )
ch = ar - > rx_channel ;
2015-03-31 10:26:21 +00:00
if ( ! ch )
ch = ath10k_htt_rx_h_peer_channel ( ar , rxd ) ;
if ( ! ch )
ch = ath10k_htt_rx_h_vdev_channel ( ar , vdev_id ) ;
if ( ! ch )
ch = ath10k_htt_rx_h_any_channel ( ar ) ;
2014-03-24 21:23:18 +01:00
spin_unlock_bh ( & ar - > data_lock ) ;
if ( ! ch )
return false ;
status - > band = ch - > band ;
status - > freq = ch - > center_freq ;
return true ;
}
2014-11-18 09:24:49 +02:00
static void ath10k_htt_rx_h_signal ( struct ath10k * ar ,
struct ieee80211_rx_status * status ,
struct htt_rx_desc * rxd )
{
/* FIXME: Get real NF */
status - > signal = ATH10K_DEFAULT_NOISE_FLOOR +
rxd - > ppdu_start . rssi_comb ;
status - > flag & = ~ RX_FLAG_NO_SIGNAL_VAL ;
}
static void ath10k_htt_rx_h_mactime ( struct ath10k * ar ,
struct ieee80211_rx_status * status ,
struct htt_rx_desc * rxd )
{
/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
* means all prior MSDUs in a PPDU are reported to mac80211 without the
* TSF . Is it worth holding frames until end of PPDU is known ?
*
* FIXME : Can we get / compute 64 bit TSF ?
*/
2015-01-24 12:14:48 +02:00
status - > mactime = __le32_to_cpu ( rxd - > ppdu_end . common . tsf_timestamp ) ;
2014-11-18 09:24:49 +02:00
status - > flag | = RX_FLAG_MACTIME_END ;
}
static void ath10k_htt_rx_h_ppdu ( struct ath10k * ar ,
struct sk_buff_head * amsdu ,
2015-03-31 10:26:21 +00:00
struct ieee80211_rx_status * status ,
u32 vdev_id )
2014-11-18 09:24:49 +02:00
{
struct sk_buff * first ;
struct htt_rx_desc * rxd ;
bool is_first_ppdu ;
bool is_last_ppdu ;
if ( skb_queue_empty ( amsdu ) )
return ;
first = skb_peek ( amsdu ) ;
rxd = ( void * ) first - > data - sizeof ( * rxd ) ;
is_first_ppdu = ! ! ( rxd - > attention . flags &
__cpu_to_le32 ( RX_ATTENTION_FLAGS_FIRST_MPDU ) ) ;
is_last_ppdu = ! ! ( rxd - > attention . flags &
__cpu_to_le32 ( RX_ATTENTION_FLAGS_LAST_MPDU ) ) ;
if ( is_first_ppdu ) {
/* New PPDU starts so clear out the old per-PPDU status. */
status - > freq = 0 ;
status - > rate_idx = 0 ;
status - > vht_nss = 0 ;
status - > vht_flag & = ~ RX_VHT_FLAG_80MHZ ;
status - > flag & = ~ ( RX_FLAG_HT |
RX_FLAG_VHT |
RX_FLAG_SHORT_GI |
RX_FLAG_40MHZ |
RX_FLAG_MACTIME_END ) ;
status - > flag | = RX_FLAG_NO_SIGNAL_VAL ;
ath10k_htt_rx_h_signal ( ar , status , rxd ) ;
2015-03-31 10:26:21 +00:00
ath10k_htt_rx_h_channel ( ar , status , rxd , vdev_id ) ;
2014-11-18 09:24:49 +02:00
ath10k_htt_rx_h_rates ( ar , status , rxd ) ;
}
if ( is_last_ppdu )
ath10k_htt_rx_h_mactime ( ar , status , rxd ) ;
}
2014-07-28 23:59:43 +03:00
static const char * const tid_to_ac [ ] = {
" BE " ,
" BK " ,
" BK " ,
" BE " ,
" VI " ,
" VI " ,
" VO " ,
" VO " ,
} ;
static char * ath10k_get_tid ( struct ieee80211_hdr * hdr , char * out , size_t size )
{
u8 * qc ;
int tid ;
if ( ! ieee80211_is_data_qos ( hdr - > frame_control ) )
return " " ;
qc = ieee80211_get_qos_ctl ( hdr ) ;
tid = * qc & IEEE80211_QOS_CTL_TID_MASK ;
if ( tid < 8 )
snprintf ( out , size , " tid %d (%s) " , tid , tid_to_ac [ tid ] ) ;
else
snprintf ( out , size , " tid %d " , tid ) ;
return out ;
}
2014-03-24 21:23:22 +01:00
static void ath10k_process_rx ( struct ath10k * ar ,
struct ieee80211_rx_status * rx_status ,
struct sk_buff * skb )
2014-03-24 21:23:15 +01:00
{
struct ieee80211_rx_status * status ;
2014-07-28 23:59:43 +03:00
struct ieee80211_hdr * hdr = ( struct ieee80211_hdr * ) skb - > data ;
char tid [ 32 ] ;
2014-03-24 21:23:15 +01:00
2014-03-24 21:23:22 +01:00
status = IEEE80211_SKB_RXCB ( skb ) ;
* status = * rx_status ;
2014-03-24 21:23:15 +01:00
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_DATA ,
2014-07-28 23:59:43 +03:00
" rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i \n " ,
2014-03-24 21:23:22 +01:00
skb ,
skb - > len ,
2014-07-28 23:59:43 +03:00
ieee80211_get_SA ( hdr ) ,
ath10k_get_tid ( hdr , tid , sizeof ( tid ) ) ,
is_multicast_ether_addr ( ieee80211_get_DA ( hdr ) ) ?
" mcast " : " ucast " ,
( __le16_to_cpu ( hdr - > seq_ctrl ) & IEEE80211_SCTL_SEQ ) > > 4 ,
2014-03-24 21:23:15 +01:00
status - > flag = = 0 ? " legacy " : " " ,
status - > flag & RX_FLAG_HT ? " ht " : " " ,
status - > flag & RX_FLAG_VHT ? " vht " : " " ,
status - > flag & RX_FLAG_40MHZ ? " 40 " : " " ,
status - > vht_flag & RX_VHT_FLAG_80MHZ ? " 80 " : " " ,
status - > flag & RX_FLAG_SHORT_GI ? " sgi " : " " ,
status - > rate_idx ,
status - > vht_nss ,
status - > freq ,
2014-03-24 21:23:19 +01:00
status - > band , status - > flag ,
2014-03-24 21:23:21 +01:00
! ! ( status - > flag & RX_FLAG_FAILED_FCS_CRC ) ,
2014-07-28 23:59:43 +03:00
! ! ( status - > flag & RX_FLAG_MMIC_ERROR ) ,
! ! ( status - > flag & RX_FLAG_AMSDU_MORE ) ) ;
2014-08-25 12:09:38 +02:00
ath10k_dbg_dump ( ar , ATH10K_DBG_HTT_DUMP , NULL , " rx skb: " ,
2014-03-24 21:23:22 +01:00
skb - > data , skb - > len ) ;
2014-11-05 19:14:31 +05:30
trace_ath10k_rx_hdr ( ar , skb - > data , skb - > len ) ;
trace_ath10k_rx_payload ( ar , skb - > data , skb - > len ) ;
2014-03-24 21:23:15 +01:00
2014-03-24 21:23:22 +01:00
ieee80211_rx ( ar - > hw , skb ) ;
2014-03-24 21:23:15 +01:00
}
2015-05-19 14:09:34 +02:00
static int ath10k_htt_rx_nwifi_hdrlen ( struct ath10k * ar ,
struct ieee80211_hdr * hdr )
2014-02-25 09:29:57 +02:00
{
2015-05-19 14:09:34 +02:00
int len = ieee80211_hdrlen ( hdr - > frame_control ) ;
if ( ! test_bit ( ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING ,
ar - > fw_features ) )
len = round_up ( len , 4 ) ;
return len ;
2014-02-25 09:29:57 +02:00
}
2014-11-18 09:24:48 +02:00
static void ath10k_htt_rx_h_undecap_raw ( struct ath10k * ar ,
struct sk_buff * msdu ,
struct ieee80211_rx_status * status ,
enum htt_rx_mpdu_encrypt_type enctype ,
bool is_decrypted )
2013-06-12 20:52:10 +03:00
{
2014-11-18 09:24:48 +02:00
struct ieee80211_hdr * hdr ;
2013-06-12 20:52:10 +03:00
struct htt_rx_desc * rxd ;
2014-11-18 09:24:48 +02:00
size_t hdr_len ;
size_t crypto_len ;
bool is_first ;
bool is_last ;
rxd = ( void * ) msdu - > data - sizeof ( * rxd ) ;
2015-07-15 19:01:21 -07:00
is_first = ! ! ( rxd - > msdu_end . common . info0 &
2014-11-18 09:24:48 +02:00
__cpu_to_le32 ( RX_MSDU_END_INFO0_FIRST_MSDU ) ) ;
2015-07-15 19:01:21 -07:00
is_last = ! ! ( rxd - > msdu_end . common . info0 &
2014-11-18 09:24:48 +02:00
__cpu_to_le32 ( RX_MSDU_END_INFO0_LAST_MSDU ) ) ;
/* Delivered decapped frame:
* [ 802.11 header ]
* [ crypto param ] < - - can be trimmed if ! fcs_err & &
* ! decrypt_err & & ! peer_idx_invalid
* [ amsdu header ] < - - only if A - MSDU
* [ rfc1042 / llc ]
* [ payload ]
* [ FCS ] < - - at end , needs to be trimmed
*/
/* This probably shouldn't happen but warn just in case */
if ( unlikely ( WARN_ON_ONCE ( ! is_first ) ) )
return ;
/* This probably shouldn't happen but warn just in case */
if ( unlikely ( WARN_ON_ONCE ( ! ( is_first & & is_last ) ) ) )
return ;
skb_trim ( msdu , msdu - > len - FCS_LEN ) ;
/* In most cases this will be true for sniffed frames. It makes sense
ath10k: enable raw encap mode and software crypto engine
This patch enables raw Rx/Tx encap mode to support software based
crypto engine. This patch introduces a new module param 'cryptmode'.
cryptmode:
0: Use hardware crypto engine globally with native Wi-Fi mode TX/RX
encapsulation to the firmware. This is the default mode.
1: Use sofware crypto engine globally with raw mode TX/RX
encapsulation to the firmware.
Known limitation:
A-MSDU must be disabled for RAW Tx encap mode to perform well when
heavy traffic is applied.
Testing: (by Michal Kazior <michal.kazior@tieto.com>)
a) Performance Testing
cryptmode=1
ap=qca988x sta=killer1525
killer1525 -> qca988x 194.496 mbps [tcp1 ip4]
killer1525 -> qca988x 238.309 mbps [tcp5 ip4]
killer1525 -> qca988x 266.958 mbps [udp1 ip4]
killer1525 -> qca988x 477.468 mbps [udp5 ip4]
qca988x -> killer1525 301.378 mbps [tcp1 ip4]
qca988x -> killer1525 297.949 mbps [tcp5 ip4]
qca988x -> killer1525 331.351 mbps [udp1 ip4]
qca988x -> killer1525 371.528 mbps [udp5 ip4]
ap=killer1525 sta=qca988x
qca988x -> killer1525 331.447 mbps [tcp1 ip4]
qca988x -> killer1525 328.783 mbps [tcp5 ip4]
qca988x -> killer1525 375.309 mbps [udp1 ip4]
qca988x -> killer1525 403.379 mbps [udp5 ip4]
killer1525 -> qca988x 203.689 mbps [tcp1 ip4]
killer1525 -> qca988x 222.339 mbps [tcp5 ip4]
killer1525 -> qca988x 264.199 mbps [udp1 ip4]
killer1525 -> qca988x 479.371 mbps [udp5 ip4]
Note:
- only open network tested for RAW vs nwifi performance comparison
- killer1525 (qca6174 hw2.2) is 2x2 device (hence max 866mbps)
- used iperf
- OTA, devices a few cm apart from each other, no shielding
- tcpX/udpX, X - means number of threads used
Overview:
- relative Tx performance drop is seen but is within reasonable and
expected threshold (A-MSDU must be disabled with RAW Tx)
b) Connectivity Testing
cryptmode=1
ap=iwl6205 sta1=qca988x crypto=open topology-1ap1sta OK
ap=iwl6205 sta1=qca988x crypto=wep1 topology-1ap1sta OK
ap=iwl6205 sta1=qca988x crypto=wpa topology-1ap1sta OK
ap=iwl6205 sta1=qca988x crypto=wpa-ccmp topology-1ap1sta OK
ap=qca988x sta1=iwl6205 crypto=open topology-1ap1sta OK
ap=qca988x sta1=iwl6205 crypto=wep1 topology-1ap1sta OK
ap=qca988x sta1=iwl6205 crypto=wpa topology-1ap1sta OK
ap=qca988x sta1=iwl6205 crypto=wpa-ccmp topology-1ap1sta OK
ap=iwl6205 sta1=qca988x crypto=open topology-1ap1sta2br OK
ap=iwl6205 sta1=qca988x crypto=wep1 topology-1ap1sta2br OK
ap=iwl6205 sta1=qca988x crypto=wpa topology-1ap1sta2br OK
ap=iwl6205 sta1=qca988x crypto=wpa-ccmp topology-1ap1sta2br OK
ap=qca988x sta1=iwl6205 crypto=open topology-1ap1sta2br OK
ap=qca988x sta1=iwl6205 crypto=wep1 topology-1ap1sta2br OK
ap=qca988x sta1=iwl6205 crypto=wpa topology-1ap1sta2br OK
ap=qca988x sta1=iwl6205 crypto=wpa-ccmp topology-1ap1sta2br OK
ap=iwl6205 sta1=qca988x crypto=open topology-1ap1sta2br1vlan OK
ap=iwl6205 sta1=qca988x crypto=wep1 topology-1ap1sta2br1vlan OK
ap=iwl6205 sta1=qca988x crypto=wpa topology-1ap1sta2br1vlan OK
ap=iwl6205 sta1=qca988x crypto=wpa-ccmp topology-1ap1sta2br1vlan OK
ap=qca988x sta1=iwl6205 crypto=open topology-1ap1sta2br1vlan OK
ap=qca988x sta1=iwl6205 crypto=wep1 topology-1ap1sta2br1vlan OK
ap=qca988x sta1=iwl6205 crypto=wpa topology-1ap1sta2br1vlan OK
ap=qca988x sta1=iwl6205 crypto=wpa-ccmp topology-1ap1sta2br1vlan OK
Note:
- each test takes all possible endpoint pairs and pings
- each pair-ping flushes arp table
- ip6 is used
c) Testbed Topology:
1ap1sta:
[ap] ---- [sta]
endpoints: ap, sta
1ap1sta2br:
[veth0] [ap] ---- [sta] [veth2]
| | | |
[veth1] | \ [veth3]
\ / \ /
[br0] [br1]
endpoints: veth0, veth2, br0, br1
note: STA works in 4addr mode, AP has wds_sta=1
1ap1sta2br1vlan:
[veth0] [ap] ---- [sta] [veth2]
| | | |
[veth1] | \ [veth3]
\ / \ /
[br0] [br1]
| |
[vlan0_id2] [vlan1_id2]
endpoints: vlan0_id2, vlan1_id2
note: STA works in 4addr mode, AP has wds_sta=1
Credits:
Thanks to Michal Kazior <michal.kazior@tieto.com> who helped find the
amsdu issue, contributed a workaround (already squashed into this
patch), and contributed the throughput and connectivity tests results.
Signed-off-by: David Liu <cfliu.tw@gmail.com>
Signed-off-by: Michal Kazior <michal.kazior@tieto.com>
Tested-by: Michal Kazior <michal.kazior@tieto.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
2015-07-24 20:25:32 +03:00
* to deliver them as - is without stripping the crypto param . This is
* necessary for software based decryption .
2014-11-18 09:24:48 +02:00
*
* If there ' s no error then the frame is decrypted . At least that is
* the case for frames that come in via fragmented rx indication .
*/
if ( ! is_decrypted )
return ;
/* The payload is decrypted so strip crypto params. Start from tail
* since hdr is used to compute some stuff .
*/
hdr = ( void * ) msdu - > data ;
/* Tail */
skb_trim ( msdu , msdu - > len - ath10k_htt_rx_crypto_tail_len ( ar , enctype ) ) ;
/* MMIC */
if ( ! ieee80211_has_morefrags ( hdr - > frame_control ) & &
enctype = = HTT_RX_MPDU_ENCRYPT_TKIP_WPA )
skb_trim ( msdu , msdu - > len - 8 ) ;
/* Head */
hdr_len = ieee80211_hdrlen ( hdr - > frame_control ) ;
crypto_len = ath10k_htt_rx_crypto_param_len ( ar , enctype ) ;
memmove ( ( void * ) msdu - > data + crypto_len ,
( void * ) msdu - > data , hdr_len ) ;
skb_pull ( msdu , crypto_len ) ;
}
static void ath10k_htt_rx_h_undecap_nwifi ( struct ath10k * ar ,
struct sk_buff * msdu ,
struct ieee80211_rx_status * status ,
const u8 first_hdr [ 64 ] )
{
2013-09-26 10:12:22 +03:00
struct ieee80211_hdr * hdr ;
2014-11-18 09:24:48 +02:00
size_t hdr_len ;
u8 da [ ETH_ALEN ] ;
u8 sa [ ETH_ALEN ] ;
2013-06-12 20:52:10 +03:00
2014-11-18 09:24:48 +02:00
/* Delivered decapped frame:
* [ nwifi 802.11 header ] < - - replaced with 802.11 hdr
* [ rfc1042 / llc ]
*
* Note : The nwifi header doesn ' t have QoS Control and is
* ( always ? ) a 3 addr frame .
*
* Note2 : There ' s no A - MSDU subframe header . Even if it ' s part
* of an A - MSDU .
*/
2014-11-18 09:24:47 +02:00
2014-11-18 09:24:48 +02:00
/* pull decapped header and copy SA & DA */
hdr = ( struct ieee80211_hdr * ) msdu - > data ;
2015-05-19 14:09:34 +02:00
hdr_len = ath10k_htt_rx_nwifi_hdrlen ( ar , hdr ) ;
2014-11-18 09:24:48 +02:00
ether_addr_copy ( da , ieee80211_get_DA ( hdr ) ) ;
ether_addr_copy ( sa , ieee80211_get_SA ( hdr ) ) ;
skb_pull ( msdu , hdr_len ) ;
2013-06-12 20:52:10 +03:00
2014-11-18 09:24:48 +02:00
/* push original 802.11 header */
hdr = ( struct ieee80211_hdr * ) first_hdr ;
2013-09-26 10:12:22 +03:00
hdr_len = ieee80211_hdrlen ( hdr - > frame_control ) ;
2014-11-18 09:24:48 +02:00
memcpy ( skb_push ( msdu , hdr_len ) , hdr , hdr_len ) ;
2013-06-12 20:52:10 +03:00
2014-11-18 09:24:48 +02:00
/* original 802.11 header has a different DA and in
* case of 4 addr it may also have different SA
*/
hdr = ( struct ieee80211_hdr * ) msdu - > data ;
ether_addr_copy ( ieee80211_get_DA ( hdr ) , da ) ;
ether_addr_copy ( ieee80211_get_SA ( hdr ) , sa ) ;
}
2013-06-12 20:52:10 +03:00
2014-11-18 09:24:48 +02:00
static void * ath10k_htt_rx_h_find_rfc1042 ( struct ath10k * ar ,
struct sk_buff * msdu ,
enum htt_rx_mpdu_encrypt_type enctype )
{
struct ieee80211_hdr * hdr ;
struct htt_rx_desc * rxd ;
size_t hdr_len , crypto_len ;
void * rfc1042 ;
bool is_first , is_last , is_amsdu ;
2013-09-26 10:12:23 +03:00
2014-11-18 09:24:48 +02:00
rxd = ( void * ) msdu - > data - sizeof ( * rxd ) ;
hdr = ( void * ) rxd - > rx_hdr_status ;
2013-09-26 10:12:22 +03:00
2015-07-15 19:01:21 -07:00
is_first = ! ! ( rxd - > msdu_end . common . info0 &
2014-11-18 09:24:48 +02:00
__cpu_to_le32 ( RX_MSDU_END_INFO0_FIRST_MSDU ) ) ;
2015-07-15 19:01:21 -07:00
is_last = ! ! ( rxd - > msdu_end . common . info0 &
2014-11-18 09:24:48 +02:00
__cpu_to_le32 ( RX_MSDU_END_INFO0_LAST_MSDU ) ) ;
is_amsdu = ! ( is_first & & is_last ) ;
2013-06-12 20:52:10 +03:00
2014-11-18 09:24:48 +02:00
rfc1042 = hdr ;
2013-06-12 20:52:10 +03:00
2014-11-18 09:24:48 +02:00
if ( is_first ) {
hdr_len = ieee80211_hdrlen ( hdr - > frame_control ) ;
crypto_len = ath10k_htt_rx_crypto_param_len ( ar , enctype ) ;
2013-11-13 15:23:30 +02:00
2014-11-18 09:24:48 +02:00
rfc1042 + = round_up ( hdr_len , 4 ) +
round_up ( crypto_len , 4 ) ;
2013-09-26 10:12:22 +03:00
}
2013-06-12 20:52:10 +03:00
2014-11-18 09:24:48 +02:00
if ( is_amsdu )
rfc1042 + = sizeof ( struct amsdu_subframe_hdr ) ;
return rfc1042 ;
2013-06-12 20:52:10 +03:00
}
2014-11-18 09:24:48 +02:00
static void ath10k_htt_rx_h_undecap_eth ( struct ath10k * ar ,
struct sk_buff * msdu ,
struct ieee80211_rx_status * status ,
const u8 first_hdr [ 64 ] ,
enum htt_rx_mpdu_encrypt_type enctype )
2013-06-12 20:52:10 +03:00
{
struct ieee80211_hdr * hdr ;
2014-11-18 09:24:48 +02:00
struct ethhdr * eth ;
size_t hdr_len ;
2013-09-26 10:12:23 +03:00
void * rfc1042 ;
2014-11-18 09:24:48 +02:00
u8 da [ ETH_ALEN ] ;
u8 sa [ ETH_ALEN ] ;
2013-06-12 20:52:10 +03:00
2014-11-18 09:24:48 +02:00
/* Delivered decapped frame:
* [ eth header ] < - - replaced with 802.11 hdr & rfc1042 / llc
* [ payload ]
*/
rfc1042 = ath10k_htt_rx_h_find_rfc1042 ( ar , msdu , enctype ) ;
if ( WARN_ON_ONCE ( ! rfc1042 ) )
return ;
/* pull decapped header and copy SA & DA */
eth = ( struct ethhdr * ) msdu - > data ;
ether_addr_copy ( da , eth - > h_dest ) ;
ether_addr_copy ( sa , eth - > h_source ) ;
skb_pull ( msdu , sizeof ( struct ethhdr ) ) ;
/* push rfc1042/llc/snap */
memcpy ( skb_push ( msdu , sizeof ( struct rfc1042_hdr ) ) , rfc1042 ,
sizeof ( struct rfc1042_hdr ) ) ;
/* push original 802.11 header */
hdr = ( struct ieee80211_hdr * ) first_hdr ;
hdr_len = ieee80211_hdrlen ( hdr - > frame_control ) ;
memcpy ( skb_push ( msdu , hdr_len ) , hdr , hdr_len ) ;
/* original 802.11 header has a different DA and in
* case of 4 addr it may also have different SA
*/
hdr = ( struct ieee80211_hdr * ) msdu - > data ;
ether_addr_copy ( ieee80211_get_DA ( hdr ) , da ) ;
ether_addr_copy ( ieee80211_get_SA ( hdr ) , sa ) ;
}
static void ath10k_htt_rx_h_undecap_snap ( struct ath10k * ar ,
struct sk_buff * msdu ,
struct ieee80211_rx_status * status ,
const u8 first_hdr [ 64 ] )
{
struct ieee80211_hdr * hdr ;
size_t hdr_len ;
/* Delivered decapped frame:
* [ amsdu header ] < - - replaced with 802.11 hdr
* [ rfc1042 / llc ]
* [ payload ]
*/
skb_pull ( msdu , sizeof ( struct amsdu_subframe_hdr ) ) ;
hdr = ( struct ieee80211_hdr * ) first_hdr ;
2013-09-26 10:12:23 +03:00
hdr_len = ieee80211_hdrlen ( hdr - > frame_control ) ;
2014-11-18 09:24:48 +02:00
memcpy ( skb_push ( msdu , hdr_len ) , hdr , hdr_len ) ;
}
2013-06-12 20:52:10 +03:00
2014-11-18 09:24:48 +02:00
static void ath10k_htt_rx_h_undecap ( struct ath10k * ar ,
struct sk_buff * msdu ,
struct ieee80211_rx_status * status ,
u8 first_hdr [ 64 ] ,
enum htt_rx_mpdu_encrypt_type enctype ,
bool is_decrypted )
{
struct htt_rx_desc * rxd ;
enum rx_msdu_decap_format decap ;
2013-09-26 10:12:22 +03:00
2014-11-18 09:24:48 +02:00
/* First msdu's decapped header:
* [ 802.11 header ] < - - padded to 4 bytes long
* [ crypto param ] < - - padded to 4 bytes long
* [ amsdu header ] < - - only if A - MSDU
* [ rfc1042 / llc ]
*
* Other ( 2 nd , 3 rd , . . ) msdu ' s decapped header :
* [ amsdu header ] < - - only if A - MSDU
* [ rfc1042 / llc ]
*/
rxd = ( void * ) msdu - > data - sizeof ( * rxd ) ;
2015-07-15 19:01:21 -07:00
decap = MS ( __le32_to_cpu ( rxd - > msdu_start . common . info1 ) ,
2014-11-18 09:24:48 +02:00
RX_MSDU_START_INFO1_DECAP_FORMAT ) ;
switch ( decap ) {
2013-06-12 20:52:10 +03:00
case RX_MSDU_DECAP_RAW :
2014-11-18 09:24:48 +02:00
ath10k_htt_rx_h_undecap_raw ( ar , msdu , status , enctype ,
is_decrypted ) ;
2013-06-12 20:52:10 +03:00
break ;
case RX_MSDU_DECAP_NATIVE_WIFI :
2014-11-18 09:24:48 +02:00
ath10k_htt_rx_h_undecap_nwifi ( ar , msdu , status , first_hdr ) ;
2013-06-12 20:52:10 +03:00
break ;
case RX_MSDU_DECAP_ETHERNET2_DIX :
2014-11-18 09:24:48 +02:00
ath10k_htt_rx_h_undecap_eth ( ar , msdu , status , first_hdr , enctype ) ;
2013-09-26 10:12:23 +03:00
break ;
case RX_MSDU_DECAP_8023_SNAP_LLC :
2014-11-18 09:24:48 +02:00
ath10k_htt_rx_h_undecap_snap ( ar , msdu , status , first_hdr ) ;
2013-09-26 10:12:23 +03:00
break ;
2013-06-12 20:52:10 +03:00
}
}
2013-07-31 10:47:56 +02:00
static int ath10k_htt_rx_get_csum_state ( struct sk_buff * skb )
{
struct htt_rx_desc * rxd ;
u32 flags , info ;
bool is_ip4 , is_ip6 ;
bool is_tcp , is_udp ;
bool ip_csum_ok , tcpudp_csum_ok ;
rxd = ( void * ) skb - > data - sizeof ( * rxd ) ;
flags = __le32_to_cpu ( rxd - > attention . flags ) ;
2015-07-15 19:01:21 -07:00
info = __le32_to_cpu ( rxd - > msdu_start . common . info1 ) ;
2013-07-31 10:47:56 +02:00
is_ip4 = ! ! ( info & RX_MSDU_START_INFO1_IPV4_PROTO ) ;
is_ip6 = ! ! ( info & RX_MSDU_START_INFO1_IPV6_PROTO ) ;
is_tcp = ! ! ( info & RX_MSDU_START_INFO1_TCP_PROTO ) ;
is_udp = ! ! ( info & RX_MSDU_START_INFO1_UDP_PROTO ) ;
ip_csum_ok = ! ( flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL ) ;
tcpudp_csum_ok = ! ( flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL ) ;
if ( ! is_ip4 & & ! is_ip6 )
return CHECKSUM_NONE ;
if ( ! is_tcp & & ! is_udp )
return CHECKSUM_NONE ;
if ( ! ip_csum_ok )
return CHECKSUM_NONE ;
if ( ! tcpudp_csum_ok )
return CHECKSUM_NONE ;
return CHECKSUM_UNNECESSARY ;
}
2014-11-18 09:24:48 +02:00
static void ath10k_htt_rx_h_csum_offload ( struct sk_buff * msdu )
{
msdu - > ip_summed = ath10k_htt_rx_get_csum_state ( msdu ) ;
}
static void ath10k_htt_rx_h_mpdu ( struct ath10k * ar ,
struct sk_buff_head * amsdu ,
struct ieee80211_rx_status * status )
{
struct sk_buff * first ;
struct sk_buff * last ;
struct sk_buff * msdu ;
struct htt_rx_desc * rxd ;
struct ieee80211_hdr * hdr ;
enum htt_rx_mpdu_encrypt_type enctype ;
u8 first_hdr [ 64 ] ;
u8 * qos ;
size_t hdr_len ;
bool has_fcs_err ;
bool has_crypto_err ;
bool has_tkip_err ;
bool has_peer_idx_invalid ;
bool is_decrypted ;
u32 attention ;
if ( skb_queue_empty ( amsdu ) )
return ;
first = skb_peek ( amsdu ) ;
rxd = ( void * ) first - > data - sizeof ( * rxd ) ;
enctype = MS ( __le32_to_cpu ( rxd - > mpdu_start . info0 ) ,
RX_MPDU_START_INFO0_ENCRYPT_TYPE ) ;
/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
* decapped header . It ' ll be used for undecapping of each MSDU .
*/
hdr = ( void * ) rxd - > rx_hdr_status ;
hdr_len = ieee80211_hdrlen ( hdr - > frame_control ) ;
memcpy ( first_hdr , hdr , hdr_len ) ;
/* Each A-MSDU subframe will use the original header as the base and be
* reported as a separate MSDU so strip the A - MSDU bit from QoS Ctl .
*/
hdr = ( void * ) first_hdr ;
qos = ieee80211_get_qos_ctl ( hdr ) ;
qos [ 0 ] & = ~ IEEE80211_QOS_CTL_A_MSDU_PRESENT ;
/* Some attention flags are valid only in the last MSDU. */
last = skb_peek_tail ( amsdu ) ;
rxd = ( void * ) last - > data - sizeof ( * rxd ) ;
attention = __le32_to_cpu ( rxd - > attention . flags ) ;
has_fcs_err = ! ! ( attention & RX_ATTENTION_FLAGS_FCS_ERR ) ;
has_crypto_err = ! ! ( attention & RX_ATTENTION_FLAGS_DECRYPT_ERR ) ;
has_tkip_err = ! ! ( attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR ) ;
has_peer_idx_invalid = ! ! ( attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID ) ;
/* Note: If hardware captures an encrypted frame that it can't decrypt,
* e . g . due to fcs error , missing peer or invalid key data it will
* report the frame as raw .
*/
is_decrypted = ( enctype ! = HTT_RX_MPDU_ENCRYPT_NONE & &
! has_fcs_err & &
! has_crypto_err & &
! has_peer_idx_invalid ) ;
/* Clear per-MPDU flags while leaving per-PPDU flags intact. */
status - > flag & = ~ ( RX_FLAG_FAILED_FCS_CRC |
RX_FLAG_MMIC_ERROR |
RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED ) ;
if ( has_fcs_err )
status - > flag | = RX_FLAG_FAILED_FCS_CRC ;
if ( has_tkip_err )
status - > flag | = RX_FLAG_MMIC_ERROR ;
if ( is_decrypted )
status - > flag | = RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED ;
skb_queue_walk ( amsdu , msdu ) {
ath10k_htt_rx_h_csum_offload ( msdu ) ;
ath10k_htt_rx_h_undecap ( ar , msdu , status , first_hdr , enctype ,
is_decrypted ) ;
/* Undecapping involves copying the original 802.11 header back
* to sk_buff . If frame is protected and hardware has decrypted
* it then remove the protected bit .
*/
if ( ! is_decrypted )
continue ;
hdr = ( void * ) msdu - > data ;
hdr - > frame_control & = ~ __cpu_to_le16 ( IEEE80211_FCTL_PROTECTED ) ;
}
}
static void ath10k_htt_rx_h_deliver ( struct ath10k * ar ,
struct sk_buff_head * amsdu ,
struct ieee80211_rx_status * status )
{
struct sk_buff * msdu ;
while ( ( msdu = __skb_dequeue ( amsdu ) ) ) {
/* Setup per-MSDU flags */
if ( skb_queue_empty ( amsdu ) )
status - > flag & = ~ RX_FLAG_AMSDU_MORE ;
else
status - > flag | = RX_FLAG_AMSDU_MORE ;
ath10k_process_rx ( ar , status , msdu ) ;
}
}
2014-11-18 09:24:47 +02:00
static int ath10k_unchain_msdu ( struct sk_buff_head * amsdu )
2014-03-03 14:07:09 -08:00
{
2014-11-18 09:24:47 +02:00
struct sk_buff * skb , * first ;
2014-03-03 14:07:09 -08:00
int space ;
int total_len = 0 ;
/* TODO: Might could optimize this by using
* skb_try_coalesce or similar method to
* decrease copying , or maybe get mac80211 to
* provide a way to just receive a list of
* skb ?
*/
2014-11-18 09:24:47 +02:00
first = __skb_dequeue ( amsdu ) ;
2014-03-03 14:07:09 -08:00
/* Allocate total length all at once. */
2014-11-18 09:24:47 +02:00
skb_queue_walk ( amsdu , skb )
total_len + = skb - > len ;
2014-03-03 14:07:09 -08:00
2014-11-18 09:24:47 +02:00
space = total_len - skb_tailroom ( first ) ;
2014-03-03 14:07:09 -08:00
if ( ( space > 0 ) & &
2014-11-18 09:24:47 +02:00
( pskb_expand_head ( first , 0 , space , GFP_ATOMIC ) < 0 ) ) {
2014-03-03 14:07:09 -08:00
/* TODO: bump some rx-oom error stat */
/* put it back together so we can free the
* whole list at once .
*/
2014-11-18 09:24:47 +02:00
__skb_queue_head ( amsdu , first ) ;
2014-03-03 14:07:09 -08:00
return - 1 ;
}
/* Walk list again, copying contents into
* msdu_head
*/
2014-11-18 09:24:47 +02:00
while ( ( skb = __skb_dequeue ( amsdu ) ) ) {
skb_copy_from_linear_data ( skb , skb_put ( first , skb - > len ) ,
skb - > len ) ;
dev_kfree_skb_any ( skb ) ;
2014-03-03 14:07:09 -08:00
}
2014-11-18 09:24:47 +02:00
__skb_queue_head ( amsdu , first ) ;
2014-03-03 14:07:09 -08:00
return 0 ;
}
2014-11-18 09:24:48 +02:00
static void ath10k_htt_rx_h_unchain ( struct ath10k * ar ,
struct sk_buff_head * amsdu ,
bool chained )
2014-03-19 07:09:40 +01:00
{
2014-11-18 09:24:48 +02:00
struct sk_buff * first ;
struct htt_rx_desc * rxd ;
enum rx_msdu_decap_format decap ;
2014-08-25 12:09:38 +02:00
2014-11-18 09:24:48 +02:00
first = skb_peek ( amsdu ) ;
rxd = ( void * ) first - > data - sizeof ( * rxd ) ;
2015-07-15 19:01:21 -07:00
decap = MS ( __le32_to_cpu ( rxd - > msdu_start . common . info1 ) ,
2014-11-18 09:24:48 +02:00
RX_MSDU_START_INFO1_DECAP_FORMAT ) ;
2014-03-19 07:09:40 +01:00
2014-11-18 09:24:48 +02:00
if ( ! chained )
return ;
/* FIXME: Current unchaining logic can only handle simple case of raw
* msdu chaining . If decapping is other than raw the chaining may be
* more complex and this isn ' t handled by the current code . Don ' t even
* try re - constructing such frames - it ' ll be pretty much garbage .
*/
if ( decap ! = RX_MSDU_DECAP_RAW | |
skb_queue_len ( amsdu ) ! = 1 + rxd - > frag_info . ring2_more_count ) {
__skb_queue_purge ( amsdu ) ;
return ;
2014-03-19 07:09:40 +01:00
}
2014-11-18 09:24:48 +02:00
ath10k_unchain_msdu ( amsdu ) ;
}
static bool ath10k_htt_rx_amsdu_allowed ( struct ath10k * ar ,
struct sk_buff_head * amsdu ,
struct ieee80211_rx_status * rx_status )
{
struct sk_buff * msdu ;
struct htt_rx_desc * rxd ;
2014-11-24 15:34:08 +01:00
bool is_mgmt ;
bool has_fcs_err ;
2014-11-18 09:24:48 +02:00
msdu = skb_peek ( amsdu ) ;
rxd = ( void * ) msdu - > data - sizeof ( * rxd ) ;
/* FIXME: It might be a good idea to do some fuzzy-testing to drop
* invalid / dangerous frames .
*/
if ( ! rx_status - > freq ) {
ath10k_warn ( ar , " no channel configured; ignoring frame(s)! \n " ) ;
2014-03-24 21:23:18 +01:00
return false ;
}
2014-11-24 15:34:08 +01:00
is_mgmt = ! ! ( rxd - > attention . flags &
__cpu_to_le32 ( RX_ATTENTION_FLAGS_MGMT_TYPE ) ) ;
has_fcs_err = ! ! ( rxd - > attention . flags &
__cpu_to_le32 ( RX_ATTENTION_FLAGS_FCS_ERR ) ) ;
2014-11-18 09:24:48 +02:00
/* Management frames are handled via WMI events. The pros of such
* approach is that channel is explicitly provided in WMI events
* whereas HTT doesn ' t provide channel information for Rxed frames .
2014-11-24 15:34:08 +01:00
*
* However some firmware revisions don ' t report corrupted frames via
* WMI so don ' t drop them .
2014-11-18 09:24:48 +02:00
*/
2014-11-24 15:34:08 +01:00
if ( is_mgmt & & ! has_fcs_err ) {
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_HTT , " htt rx mgmt ctrl \n " ) ;
2014-03-19 07:09:40 +01:00
return false ;
}
2014-11-18 09:24:48 +02:00
if ( test_bit ( ATH10K_CAC_RUNNING , & ar - > dev_flags ) ) {
ath10k_dbg ( ar , ATH10K_DBG_HTT , " htt rx cac running \n " ) ;
2014-03-19 07:09:40 +01:00
return false ;
}
return true ;
}
2014-11-18 09:24:48 +02:00
static void ath10k_htt_rx_h_filter ( struct ath10k * ar ,
struct sk_buff_head * amsdu ,
struct ieee80211_rx_status * rx_status )
{
if ( skb_queue_empty ( amsdu ) )
return ;
if ( ath10k_htt_rx_amsdu_allowed ( ar , amsdu , rx_status ) )
return ;
__skb_queue_purge ( amsdu ) ;
}
2013-06-12 20:52:10 +03:00
static void ath10k_htt_rx_handler ( struct ath10k_htt * htt ,
struct htt_rx_indication * rx )
{
2014-08-25 12:09:38 +02:00
struct ath10k * ar = htt - > ar ;
2014-03-24 21:24:57 +01:00
struct ieee80211_rx_status * rx_status = & htt - > rx_status ;
2013-06-12 20:52:10 +03:00
struct htt_rx_indication_mpdu_range * mpdu_ranges ;
2014-11-18 09:24:47 +02:00
struct sk_buff_head amsdu ;
2013-06-12 20:52:10 +03:00
int num_mpdu_ranges ;
int fw_desc_len ;
u8 * fw_desc ;
2014-11-18 09:24:47 +02:00
int i , ret , mpdu_count = 0 ;
2013-06-12 20:52:10 +03:00
2014-02-27 18:50:05 +02:00
lockdep_assert_held ( & htt - > rx_ring . lock ) ;
2014-11-18 09:24:48 +02:00
if ( htt - > rx_confused )
return ;
2013-06-12 20:52:10 +03:00
fw_desc_len = __le16_to_cpu ( rx - > prefix . fw_rx_desc_bytes ) ;
fw_desc = ( u8 * ) & rx - > fw_desc ;
num_mpdu_ranges = MS ( __le32_to_cpu ( rx - > hdr . info1 ) ,
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES ) ;
mpdu_ranges = htt_rx_ind_get_mpdu_ranges ( rx ) ;
2014-08-25 12:09:38 +02:00
ath10k_dbg_dump ( ar , ATH10K_DBG_HTT_DUMP , NULL , " htt rx ind: " ,
2013-06-12 20:52:10 +03:00
rx , sizeof ( * rx ) +
( sizeof ( struct htt_rx_indication_mpdu_range ) *
num_mpdu_ranges ) ) ;
2014-11-18 09:24:47 +02:00
for ( i = 0 ; i < num_mpdu_ranges ; i + + )
mpdu_count + = mpdu_ranges [ i ] . mpdu_count ;
while ( mpdu_count - - ) {
__skb_queue_head_init ( & amsdu ) ;
ret = ath10k_htt_rx_amsdu_pop ( htt , & fw_desc ,
2014-11-18 09:24:49 +02:00
& fw_desc_len , & amsdu ) ;
2014-11-18 09:24:47 +02:00
if ( ret < 0 ) {
2014-11-18 09:24:48 +02:00
ath10k_warn ( ar , " rx ring became corrupted: %d \n " , ret ) ;
2014-11-18 09:24:47 +02:00
__skb_queue_purge ( & amsdu ) ;
2014-11-18 09:24:48 +02:00
/* FIXME: It's probably a good idea to reboot the
* device instead of leaving it inoperable .
*/
htt - > rx_confused = true ;
break ;
2014-11-18 09:24:47 +02:00
}
2013-06-12 20:52:10 +03:00
2015-03-31 10:26:21 +00:00
ath10k_htt_rx_h_ppdu ( ar , & amsdu , rx_status , 0xffff ) ;
2014-11-18 09:24:48 +02:00
ath10k_htt_rx_h_unchain ( ar , & amsdu , ret > 0 ) ;
ath10k_htt_rx_h_filter ( ar , & amsdu , rx_status ) ;
ath10k_htt_rx_h_mpdu ( ar , & amsdu , rx_status ) ;
ath10k_htt_rx_h_deliver ( ar , & amsdu , rx_status ) ;
2013-06-12 20:52:10 +03:00
}
2013-09-24 10:18:36 +02:00
tasklet_schedule ( & htt - > rx_replenish_task ) ;
2013-06-12 20:52:10 +03:00
}
static void ath10k_htt_rx_frag_handler ( struct ath10k_htt * htt ,
2014-09-14 12:50:06 +03:00
struct htt_rx_fragment_indication * frag )
2013-06-12 20:52:10 +03:00
{
2014-08-25 12:09:38 +02:00
struct ath10k * ar = htt - > ar ;
2014-03-24 21:24:57 +01:00
struct ieee80211_rx_status * rx_status = & htt - > rx_status ;
2014-11-18 09:24:47 +02:00
struct sk_buff_head amsdu ;
2014-03-24 21:23:20 +01:00
int ret ;
2013-06-12 20:52:10 +03:00
u8 * fw_desc ;
2014-11-18 09:24:48 +02:00
int fw_desc_len ;
2013-06-12 20:52:10 +03:00
fw_desc_len = __le16_to_cpu ( frag - > fw_rx_desc_bytes ) ;
fw_desc = ( u8 * ) frag - > fw_msdu_rx_desc ;
2014-11-18 09:24:47 +02:00
__skb_queue_head_init ( & amsdu ) ;
2014-02-27 18:50:05 +02:00
spin_lock_bh ( & htt - > rx_ring . lock ) ;
2014-03-24 21:23:20 +01:00
ret = ath10k_htt_rx_amsdu_pop ( htt , & fw_desc , & fw_desc_len ,
2014-11-18 09:24:49 +02:00
& amsdu ) ;
2014-02-27 18:50:05 +02:00
spin_unlock_bh ( & htt - > rx_ring . lock ) ;
2013-06-12 20:52:10 +03:00
2014-10-23 17:04:24 +03:00
tasklet_schedule ( & htt - > rx_replenish_task ) ;
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_HTT_DUMP , " htt rx frag ahead \n " ) ;
2013-06-12 20:52:10 +03:00
2014-03-24 21:23:20 +01:00
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " failed to pop amsdu from httr rx ring for fragmented rx %d \n " ,
2014-03-24 21:23:20 +01:00
ret ) ;
2014-11-18 09:24:47 +02:00
__skb_queue_purge ( & amsdu ) ;
2013-06-12 20:52:10 +03:00
return ;
}
2014-11-18 09:24:47 +02:00
if ( skb_queue_len ( & amsdu ) ! = 1 ) {
ath10k_warn ( ar , " failed to pop frag amsdu: too many msdus \n " ) ;
__skb_queue_purge ( & amsdu ) ;
return ;
}
2015-03-31 10:26:21 +00:00
ath10k_htt_rx_h_ppdu ( ar , & amsdu , rx_status , 0xffff ) ;
2014-11-18 09:24:48 +02:00
ath10k_htt_rx_h_filter ( ar , & amsdu , rx_status ) ;
ath10k_htt_rx_h_mpdu ( ar , & amsdu , rx_status ) ;
ath10k_htt_rx_h_deliver ( ar , & amsdu , rx_status ) ;
2013-06-12 20:52:10 +03:00
if ( fw_desc_len > 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_HTT ,
2013-06-12 20:52:10 +03:00
" expecting more fragmented rx in one indication %d \n " ,
fw_desc_len ) ;
}
}
2014-02-27 18:50:04 +02:00
static void ath10k_htt_rx_frm_tx_compl ( struct ath10k * ar ,
struct sk_buff * skb )
{
struct ath10k_htt * htt = & ar - > htt ;
struct htt_resp * resp = ( struct htt_resp * ) skb - > data ;
struct htt_tx_done tx_done = { } ;
int status = MS ( resp - > data_tx_completion . flags , HTT_DATA_TX_STATUS ) ;
__le16 msdu_id ;
int i ;
switch ( status ) {
case HTT_DATA_TX_STATUS_NO_ACK :
tx_done . no_ack = true ;
break ;
case HTT_DATA_TX_STATUS_OK :
2015-04-01 22:53:21 +03:00
tx_done . success = true ;
2014-02-27 18:50:04 +02:00
break ;
case HTT_DATA_TX_STATUS_DISCARD :
case HTT_DATA_TX_STATUS_POSTPONE :
case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL :
tx_done . discard = true ;
break ;
default :
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " unhandled tx completion status %d \n " , status ) ;
2014-02-27 18:50:04 +02:00
tx_done . discard = true ;
break ;
}
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_HTT , " htt tx completion num_msdus %d \n " ,
2014-02-27 18:50:04 +02:00
resp - > data_tx_completion . num_msdus ) ;
for ( i = 0 ; i < resp - > data_tx_completion . num_msdus ; i + + ) {
msdu_id = resp - > data_tx_completion . msdus [ i ] ;
tx_done . msdu_id = __le16_to_cpu ( msdu_id ) ;
ath10k_txrx_tx_unref ( htt , & tx_done ) ;
}
}
2014-07-23 12:20:33 +02:00
static void ath10k_htt_rx_addba ( struct ath10k * ar , struct htt_resp * resp )
{
struct htt_rx_addba * ev = & resp - > rx_addba ;
struct ath10k_peer * peer ;
struct ath10k_vif * arvif ;
u16 info0 , tid , peer_id ;
info0 = __le16_to_cpu ( ev - > info0 ) ;
tid = MS ( info0 , HTT_RX_BA_INFO0_TID ) ;
peer_id = MS ( info0 , HTT_RX_BA_INFO0_PEER_ID ) ;
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_HTT ,
2014-07-23 12:20:33 +02:00
" htt rx addba tid %hu peer_id %hu size %hhu \n " ,
tid , peer_id , ev - > window_size ) ;
spin_lock_bh ( & ar - > data_lock ) ;
peer = ath10k_peer_find_by_id ( ar , peer_id ) ;
if ( ! peer ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " received addba event for invalid peer_id: %hu \n " ,
2014-07-23 12:20:33 +02:00
peer_id ) ;
spin_unlock_bh ( & ar - > data_lock ) ;
return ;
}
arvif = ath10k_get_arvif ( ar , peer - > vdev_id ) ;
if ( ! arvif ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " received addba event for invalid vdev_id: %u \n " ,
2014-07-23 12:20:33 +02:00
peer - > vdev_id ) ;
spin_unlock_bh ( & ar - > data_lock ) ;
return ;
}
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_HTT ,
2014-07-23 12:20:33 +02:00
" htt rx start rx ba session sta %pM tid %hu size %hhu \n " ,
peer - > addr , tid , ev - > window_size ) ;
ieee80211_start_rx_ba_session_offl ( arvif - > vif , peer - > addr , tid ) ;
spin_unlock_bh ( & ar - > data_lock ) ;
}
static void ath10k_htt_rx_delba ( struct ath10k * ar , struct htt_resp * resp )
{
struct htt_rx_delba * ev = & resp - > rx_delba ;
struct ath10k_peer * peer ;
struct ath10k_vif * arvif ;
u16 info0 , tid , peer_id ;
info0 = __le16_to_cpu ( ev - > info0 ) ;
tid = MS ( info0 , HTT_RX_BA_INFO0_TID ) ;
peer_id = MS ( info0 , HTT_RX_BA_INFO0_PEER_ID ) ;
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_HTT ,
2014-07-23 12:20:33 +02:00
" htt rx delba tid %hu peer_id %hu \n " ,
tid , peer_id ) ;
spin_lock_bh ( & ar - > data_lock ) ;
peer = ath10k_peer_find_by_id ( ar , peer_id ) ;
if ( ! peer ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " received addba event for invalid peer_id: %hu \n " ,
2014-07-23 12:20:33 +02:00
peer_id ) ;
spin_unlock_bh ( & ar - > data_lock ) ;
return ;
}
arvif = ath10k_get_arvif ( ar , peer - > vdev_id ) ;
if ( ! arvif ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " received addba event for invalid vdev_id: %u \n " ,
2014-07-23 12:20:33 +02:00
peer - > vdev_id ) ;
spin_unlock_bh ( & ar - > data_lock ) ;
return ;
}
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_HTT ,
2014-07-23 12:20:33 +02:00
" htt rx stop rx ba session sta %pM tid %hu \n " ,
peer - > addr , tid ) ;
ieee80211_stop_rx_ba_session_offl ( arvif - > vif , peer - > addr , tid ) ;
spin_unlock_bh ( & ar - > data_lock ) ;
}
2015-01-24 12:14:48 +02:00
static int ath10k_htt_rx_extract_amsdu ( struct sk_buff_head * list ,
struct sk_buff_head * amsdu )
{
struct sk_buff * msdu ;
struct htt_rx_desc * rxd ;
if ( skb_queue_empty ( list ) )
return - ENOBUFS ;
if ( WARN_ON ( ! skb_queue_empty ( amsdu ) ) )
return - EINVAL ;
while ( ( msdu = __skb_dequeue ( list ) ) ) {
__skb_queue_tail ( amsdu , msdu ) ;
rxd = ( void * ) msdu - > data - sizeof ( * rxd ) ;
2015-07-15 19:01:21 -07:00
if ( rxd - > msdu_end . common . info0 &
2015-01-24 12:14:48 +02:00
__cpu_to_le32 ( RX_MSDU_END_INFO0_LAST_MSDU ) )
break ;
}
msdu = skb_peek_tail ( amsdu ) ;
rxd = ( void * ) msdu - > data - sizeof ( * rxd ) ;
2015-07-15 19:01:21 -07:00
if ( ! ( rxd - > msdu_end . common . info0 &
2015-01-24 12:14:48 +02:00
__cpu_to_le32 ( RX_MSDU_END_INFO0_LAST_MSDU ) ) ) {
skb_queue_splice_init ( amsdu , list ) ;
return - EAGAIN ;
}
return 0 ;
}
static void ath10k_htt_rx_h_rx_offload_prot ( struct ieee80211_rx_status * status ,
struct sk_buff * skb )
{
struct ieee80211_hdr * hdr = ( struct ieee80211_hdr * ) skb - > data ;
if ( ! ieee80211_has_protected ( hdr - > frame_control ) )
return ;
/* Offloaded frames are already decrypted but firmware insists they are
* protected in the 802.11 header . Strip the flag . Otherwise mac80211
* will drop the frame .
*/
hdr - > frame_control & = ~ __cpu_to_le16 ( IEEE80211_FCTL_PROTECTED ) ;
status - > flag | = RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED ;
}
static void ath10k_htt_rx_h_rx_offload ( struct ath10k * ar ,
struct sk_buff_head * list )
{
struct ath10k_htt * htt = & ar - > htt ;
struct ieee80211_rx_status * status = & htt - > rx_status ;
struct htt_rx_offload_msdu * rx ;
struct sk_buff * msdu ;
size_t offset ;
while ( ( msdu = __skb_dequeue ( list ) ) ) {
/* Offloaded frames don't have Rx descriptor. Instead they have
* a short meta information header .
*/
rx = ( void * ) msdu - > data ;
skb_put ( msdu , sizeof ( * rx ) ) ;
skb_pull ( msdu , sizeof ( * rx ) ) ;
if ( skb_tailroom ( msdu ) < __le16_to_cpu ( rx - > msdu_len ) ) {
ath10k_warn ( ar , " dropping frame: offloaded rx msdu is too long! \n " ) ;
dev_kfree_skb_any ( msdu ) ;
continue ;
}
skb_put ( msdu , __le16_to_cpu ( rx - > msdu_len ) ) ;
/* Offloaded rx header length isn't multiple of 2 nor 4 so the
* actual payload is unaligned . Align the frame . Otherwise
* mac80211 complains . This shouldn ' t reduce performance much
* because these offloaded frames are rare .
*/
offset = 4 - ( ( unsigned long ) msdu - > data & 3 ) ;
skb_put ( msdu , offset ) ;
memmove ( msdu - > data + offset , msdu - > data , msdu - > len ) ;
skb_pull ( msdu , offset ) ;
/* FIXME: The frame is NWifi. Re-construct QoS Control
* if possible later .
*/
memset ( status , 0 , sizeof ( * status ) ) ;
status - > flag | = RX_FLAG_NO_SIGNAL_VAL ;
ath10k_htt_rx_h_rx_offload_prot ( status , msdu ) ;
2015-03-31 10:26:21 +00:00
ath10k_htt_rx_h_channel ( ar , status , NULL , rx - > vdev_id ) ;
2015-01-24 12:14:48 +02:00
ath10k_process_rx ( ar , status , msdu ) ;
}
}
static void ath10k_htt_rx_in_ord_ind ( struct ath10k * ar , struct sk_buff * skb )
{
struct ath10k_htt * htt = & ar - > htt ;
struct htt_resp * resp = ( void * ) skb - > data ;
struct ieee80211_rx_status * status = & htt - > rx_status ;
struct sk_buff_head list ;
struct sk_buff_head amsdu ;
u16 peer_id ;
u16 msdu_count ;
u8 vdev_id ;
u8 tid ;
bool offload ;
bool frag ;
int ret ;
lockdep_assert_held ( & htt - > rx_ring . lock ) ;
if ( htt - > rx_confused )
return ;
skb_pull ( skb , sizeof ( resp - > hdr ) ) ;
skb_pull ( skb , sizeof ( resp - > rx_in_ord_ind ) ) ;
peer_id = __le16_to_cpu ( resp - > rx_in_ord_ind . peer_id ) ;
msdu_count = __le16_to_cpu ( resp - > rx_in_ord_ind . msdu_count ) ;
vdev_id = resp - > rx_in_ord_ind . vdev_id ;
tid = SM ( resp - > rx_in_ord_ind . info , HTT_RX_IN_ORD_IND_INFO_TID ) ;
offload = ! ! ( resp - > rx_in_ord_ind . info &
HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK ) ;
frag = ! ! ( resp - > rx_in_ord_ind . info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK ) ;
ath10k_dbg ( ar , ATH10K_DBG_HTT ,
" htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i \n " ,
vdev_id , peer_id , tid , offload , frag , msdu_count ) ;
if ( skb - > len < msdu_count * sizeof ( * resp - > rx_in_ord_ind . msdu_descs ) ) {
ath10k_warn ( ar , " dropping invalid in order rx indication \n " ) ;
return ;
}
/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
* extracted and processed .
*/
__skb_queue_head_init ( & list ) ;
ret = ath10k_htt_rx_pop_paddr_list ( htt , & resp - > rx_in_ord_ind , & list ) ;
if ( ret < 0 ) {
ath10k_warn ( ar , " failed to pop paddr list: %d \n " , ret ) ;
htt - > rx_confused = true ;
return ;
}
/* Offloaded frames are very different and need to be handled
* separately .
*/
if ( offload )
ath10k_htt_rx_h_rx_offload ( ar , & list ) ;
while ( ! skb_queue_empty ( & list ) ) {
__skb_queue_head_init ( & amsdu ) ;
ret = ath10k_htt_rx_extract_amsdu ( & list , & amsdu ) ;
switch ( ret ) {
case 0 :
/* Note: The in-order indication may report interleaved
* frames from different PPDUs meaning reported rx rate
* to mac80211 isn ' t accurate / reliable . It ' s still
* better to report something than nothing though . This
* should still give an idea about rx rate to the user .
*/
2015-03-31 10:26:21 +00:00
ath10k_htt_rx_h_ppdu ( ar , & amsdu , status , vdev_id ) ;
2015-01-24 12:14:48 +02:00
ath10k_htt_rx_h_filter ( ar , & amsdu , status ) ;
ath10k_htt_rx_h_mpdu ( ar , & amsdu , status ) ;
ath10k_htt_rx_h_deliver ( ar , & amsdu , status ) ;
break ;
case - EAGAIN :
/* fall through */
default :
/* Should not happen. */
ath10k_warn ( ar , " failed to extract amsdu: %d \n " , ret ) ;
htt - > rx_confused = true ;
__skb_queue_purge ( & list ) ;
return ;
}
}
tasklet_schedule ( & htt - > rx_replenish_task ) ;
}
2013-06-12 20:52:10 +03:00
void ath10k_htt_t2h_msg_handler ( struct ath10k * ar , struct sk_buff * skb )
{
2013-07-05 16:15:14 +03:00
struct ath10k_htt * htt = & ar - > htt ;
2013-06-12 20:52:10 +03:00
struct htt_resp * resp = ( struct htt_resp * ) skb - > data ;
2015-03-25 13:12:27 +02:00
enum htt_t2h_msg_type type ;
2013-06-12 20:52:10 +03:00
/* confirm alignment */
if ( ! IS_ALIGNED ( ( unsigned long ) skb - > data , 4 ) )
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " unaligned htt message, expect trouble \n " ) ;
2013-06-12 20:52:10 +03:00
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_HTT , " htt rx, msg_type: 0x%0X \n " ,
2013-06-12 20:52:10 +03:00
resp - > hdr . msg_type ) ;
2015-03-25 13:12:27 +02:00
if ( resp - > hdr . msg_type > = ar - > htt . t2h_msg_types_max ) {
ath10k_dbg ( ar , ATH10K_DBG_HTT , " htt rx, unsupported msg_type: 0x%0X \n max: 0x%0X " ,
resp - > hdr . msg_type , ar - > htt . t2h_msg_types_max ) ;
dev_kfree_skb_any ( skb ) ;
return ;
}
type = ar - > htt . t2h_msg_types [ resp - > hdr . msg_type ] ;
switch ( type ) {
2013-06-12 20:52:10 +03:00
case HTT_T2H_MSG_TYPE_VERSION_CONF : {
htt - > target_version_major = resp - > ver_resp . major ;
htt - > target_version_minor = resp - > ver_resp . minor ;
complete ( & htt - > target_version_received ) ;
break ;
}
2014-02-27 18:50:04 +02:00
case HTT_T2H_MSG_TYPE_RX_IND :
2014-02-27 18:50:05 +02:00
spin_lock_bh ( & htt - > rx_ring . lock ) ;
__skb_queue_tail ( & htt - > rx_compl_q , skb ) ;
spin_unlock_bh ( & htt - > rx_ring . lock ) ;
2014-02-27 18:50:04 +02:00
tasklet_schedule ( & htt - > txrx_compl_task ) ;
return ;
2013-06-12 20:52:10 +03:00
case HTT_T2H_MSG_TYPE_PEER_MAP : {
struct htt_peer_map_event ev = {
. vdev_id = resp - > peer_map . vdev_id ,
. peer_id = __le16_to_cpu ( resp - > peer_map . peer_id ) ,
} ;
memcpy ( ev . addr , resp - > peer_map . addr , sizeof ( ev . addr ) ) ;
ath10k_peer_map_event ( htt , & ev ) ;
break ;
}
case HTT_T2H_MSG_TYPE_PEER_UNMAP : {
struct htt_peer_unmap_event ev = {
. peer_id = __le16_to_cpu ( resp - > peer_unmap . peer_id ) ,
} ;
ath10k_peer_unmap_event ( htt , & ev ) ;
break ;
}
case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION : {
struct htt_tx_done tx_done = { } ;
int status = __le32_to_cpu ( resp - > mgmt_tx_completion . status ) ;
tx_done . msdu_id =
__le32_to_cpu ( resp - > mgmt_tx_completion . desc_id ) ;
switch ( status ) {
case HTT_MGMT_TX_STATUS_OK :
2015-04-01 22:53:21 +03:00
tx_done . success = true ;
2013-06-12 20:52:10 +03:00
break ;
case HTT_MGMT_TX_STATUS_RETRY :
tx_done . no_ack = true ;
break ;
case HTT_MGMT_TX_STATUS_DROP :
tx_done . discard = true ;
break ;
}
2013-09-18 14:43:20 +02:00
ath10k_txrx_tx_unref ( htt , & tx_done ) ;
2013-06-12 20:52:10 +03:00
break ;
}
2014-02-27 18:50:04 +02:00
case HTT_T2H_MSG_TYPE_TX_COMPL_IND :
2015-07-22 16:38:24 -04:00
skb_queue_tail ( & htt - > tx_compl_q , skb ) ;
2014-02-27 18:50:04 +02:00
tasklet_schedule ( & htt - > txrx_compl_task ) ;
return ;
2013-06-12 20:52:10 +03:00
case HTT_T2H_MSG_TYPE_SEC_IND : {
struct ath10k * ar = htt - > ar ;
struct htt_security_indication * ev = & resp - > security_indication ;
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_HTT ,
2013-06-12 20:52:10 +03:00
" sec ind peer_id %d unicast %d type %d \n " ,
__le16_to_cpu ( ev - > peer_id ) ,
! ! ( ev - > flags & HTT_SECURITY_IS_UNICAST ) ,
MS ( ev - > flags , HTT_SECURITY_TYPE ) ) ;
complete ( & ar - > install_key_done ) ;
break ;
}
case HTT_T2H_MSG_TYPE_RX_FRAG_IND : {
2014-08-25 12:09:38 +02:00
ath10k_dbg_dump ( ar , ATH10K_DBG_HTT_DUMP , NULL , " htt event: " ,
2013-06-12 20:52:10 +03:00
skb - > data , skb - > len ) ;
ath10k_htt_rx_frag_handler ( htt , & resp - > rx_frag_ind ) ;
break ;
}
case HTT_T2H_MSG_TYPE_TEST :
break ;
case HTT_T2H_MSG_TYPE_STATS_CONF :
2014-09-02 11:00:21 +03:00
trace_ath10k_htt_stats ( ar , skb - > data , skb - > len ) ;
2013-09-03 11:43:55 +03:00
break ;
case HTT_T2H_MSG_TYPE_TX_INSPECT_IND :
2014-07-21 20:52:59 +03:00
/* Firmware can return tx frames if it's unable to fully
* process them and suspects host may be able to fix it . ath10k
* sends all tx frames as already inspected so this shouldn ' t
* happen unless fw has a bug .
*/
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " received an unexpected htt tx inspect event \n " ) ;
2014-07-21 20:52:59 +03:00
break ;
2013-06-12 20:52:10 +03:00
case HTT_T2H_MSG_TYPE_RX_ADDBA :
2014-07-23 12:20:33 +02:00
ath10k_htt_rx_addba ( ar , resp ) ;
break ;
2013-06-12 20:52:10 +03:00
case HTT_T2H_MSG_TYPE_RX_DELBA :
2014-07-23 12:20:33 +02:00
ath10k_htt_rx_delba ( ar , resp ) ;
break ;
2014-10-03 08:02:40 +03:00
case HTT_T2H_MSG_TYPE_PKTLOG : {
struct ath10k_pktlog_hdr * hdr =
( struct ath10k_pktlog_hdr * ) resp - > pktlog_msg . payload ;
trace_ath10k_htt_pktlog ( ar , resp - > pktlog_msg . payload ,
sizeof ( * hdr ) +
__le16_to_cpu ( hdr - > size ) ) ;
break ;
}
2014-07-23 12:20:33 +02:00
case HTT_T2H_MSG_TYPE_RX_FLUSH : {
/* Ignore this event because mac80211 takes care of Rx
* aggregation reordering .
*/
break ;
}
2015-01-24 12:14:48 +02:00
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND : {
spin_lock_bh ( & htt - > rx_ring . lock ) ;
__skb_queue_tail ( & htt - > rx_in_ord_compl_q , skb ) ;
spin_unlock_bh ( & htt - > rx_ring . lock ) ;
tasklet_schedule ( & htt - > txrx_compl_task ) ;
return ;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND :
2015-03-25 13:12:27 +02:00
break ;
case HTT_T2H_MSG_TYPE_CHAN_CHANGE :
2015-01-24 12:14:48 +02:00
break ;
ath10k: enable raw encap mode and software crypto engine
This patch enables raw Rx/Tx encap mode to support software based
crypto engine. This patch introduces a new module param 'cryptmode'.
cryptmode:
0: Use hardware crypto engine globally with native Wi-Fi mode TX/RX
encapsulation to the firmware. This is the default mode.
1: Use sofware crypto engine globally with raw mode TX/RX
encapsulation to the firmware.
Known limitation:
A-MSDU must be disabled for RAW Tx encap mode to perform well when
heavy traffic is applied.
Testing: (by Michal Kazior <michal.kazior@tieto.com>)
a) Performance Testing
cryptmode=1
ap=qca988x sta=killer1525
killer1525 -> qca988x 194.496 mbps [tcp1 ip4]
killer1525 -> qca988x 238.309 mbps [tcp5 ip4]
killer1525 -> qca988x 266.958 mbps [udp1 ip4]
killer1525 -> qca988x 477.468 mbps [udp5 ip4]
qca988x -> killer1525 301.378 mbps [tcp1 ip4]
qca988x -> killer1525 297.949 mbps [tcp5 ip4]
qca988x -> killer1525 331.351 mbps [udp1 ip4]
qca988x -> killer1525 371.528 mbps [udp5 ip4]
ap=killer1525 sta=qca988x
qca988x -> killer1525 331.447 mbps [tcp1 ip4]
qca988x -> killer1525 328.783 mbps [tcp5 ip4]
qca988x -> killer1525 375.309 mbps [udp1 ip4]
qca988x -> killer1525 403.379 mbps [udp5 ip4]
killer1525 -> qca988x 203.689 mbps [tcp1 ip4]
killer1525 -> qca988x 222.339 mbps [tcp5 ip4]
killer1525 -> qca988x 264.199 mbps [udp1 ip4]
killer1525 -> qca988x 479.371 mbps [udp5 ip4]
Note:
- only open network tested for RAW vs nwifi performance comparison
- killer1525 (qca6174 hw2.2) is 2x2 device (hence max 866mbps)
- used iperf
- OTA, devices a few cm apart from each other, no shielding
- tcpX/udpX, X - means number of threads used
Overview:
- relative Tx performance drop is seen but is within reasonable and
expected threshold (A-MSDU must be disabled with RAW Tx)
b) Connectivity Testing
cryptmode=1
ap=iwl6205 sta1=qca988x crypto=open topology-1ap1sta OK
ap=iwl6205 sta1=qca988x crypto=wep1 topology-1ap1sta OK
ap=iwl6205 sta1=qca988x crypto=wpa topology-1ap1sta OK
ap=iwl6205 sta1=qca988x crypto=wpa-ccmp topology-1ap1sta OK
ap=qca988x sta1=iwl6205 crypto=open topology-1ap1sta OK
ap=qca988x sta1=iwl6205 crypto=wep1 topology-1ap1sta OK
ap=qca988x sta1=iwl6205 crypto=wpa topology-1ap1sta OK
ap=qca988x sta1=iwl6205 crypto=wpa-ccmp topology-1ap1sta OK
ap=iwl6205 sta1=qca988x crypto=open topology-1ap1sta2br OK
ap=iwl6205 sta1=qca988x crypto=wep1 topology-1ap1sta2br OK
ap=iwl6205 sta1=qca988x crypto=wpa topology-1ap1sta2br OK
ap=iwl6205 sta1=qca988x crypto=wpa-ccmp topology-1ap1sta2br OK
ap=qca988x sta1=iwl6205 crypto=open topology-1ap1sta2br OK
ap=qca988x sta1=iwl6205 crypto=wep1 topology-1ap1sta2br OK
ap=qca988x sta1=iwl6205 crypto=wpa topology-1ap1sta2br OK
ap=qca988x sta1=iwl6205 crypto=wpa-ccmp topology-1ap1sta2br OK
ap=iwl6205 sta1=qca988x crypto=open topology-1ap1sta2br1vlan OK
ap=iwl6205 sta1=qca988x crypto=wep1 topology-1ap1sta2br1vlan OK
ap=iwl6205 sta1=qca988x crypto=wpa topology-1ap1sta2br1vlan OK
ap=iwl6205 sta1=qca988x crypto=wpa-ccmp topology-1ap1sta2br1vlan OK
ap=qca988x sta1=iwl6205 crypto=open topology-1ap1sta2br1vlan OK
ap=qca988x sta1=iwl6205 crypto=wep1 topology-1ap1sta2br1vlan OK
ap=qca988x sta1=iwl6205 crypto=wpa topology-1ap1sta2br1vlan OK
ap=qca988x sta1=iwl6205 crypto=wpa-ccmp topology-1ap1sta2br1vlan OK
Note:
- each test takes all possible endpoint pairs and pings
- each pair-ping flushes arp table
- ip6 is used
c) Testbed Topology:
1ap1sta:
[ap] ---- [sta]
endpoints: ap, sta
1ap1sta2br:
[veth0] [ap] ---- [sta] [veth2]
| | | |
[veth1] | \ [veth3]
\ / \ /
[br0] [br1]
endpoints: veth0, veth2, br0, br1
note: STA works in 4addr mode, AP has wds_sta=1
1ap1sta2br1vlan:
[veth0] [ap] ---- [sta] [veth2]
| | | |
[veth1] | \ [veth3]
\ / \ /
[br0] [br1]
| |
[vlan0_id2] [vlan1_id2]
endpoints: vlan0_id2, vlan1_id2
note: STA works in 4addr mode, AP has wds_sta=1
Credits:
Thanks to Michal Kazior <michal.kazior@tieto.com> who helped find the
amsdu issue, contributed a workaround (already squashed into this
patch), and contributed the throughput and connectivity tests results.
Signed-off-by: David Liu <cfliu.tw@gmail.com>
Signed-off-by: Michal Kazior <michal.kazior@tieto.com>
Tested-by: Michal Kazior <michal.kazior@tieto.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
2015-07-24 20:25:32 +03:00
case HTT_T2H_MSG_TYPE_AGGR_CONF :
break ;
2015-06-22 20:22:24 +05:30
case HTT_T2H_MSG_TYPE_EN_STATS :
case HTT_T2H_MSG_TYPE_TX_FETCH_IND :
case HTT_T2H_MSG_TYPE_TX_FETCH_CONF :
case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND :
2013-06-12 20:52:10 +03:00
default :
2014-10-02 13:32:55 +02:00
ath10k_warn ( ar , " htt event (%d) not handled \n " ,
resp - > hdr . msg_type ) ;
2014-08-25 12:09:38 +02:00
ath10k_dbg_dump ( ar , ATH10K_DBG_HTT_DUMP , NULL , " htt event: " ,
2013-06-12 20:52:10 +03:00
skb - > data , skb - > len ) ;
break ;
} ;
/* Free the indication buffer */
dev_kfree_skb_any ( skb ) ;
}
2014-02-27 18:50:04 +02:00
static void ath10k_htt_txrx_compl_task ( unsigned long ptr )
{
struct ath10k_htt * htt = ( struct ath10k_htt * ) ptr ;
2015-01-24 12:14:48 +02:00
struct ath10k * ar = htt - > ar ;
2014-02-27 18:50:04 +02:00
struct htt_resp * resp ;
struct sk_buff * skb ;
2015-07-22 16:38:24 -04:00
while ( ( skb = skb_dequeue ( & htt - > tx_compl_q ) ) ) {
2014-02-27 18:50:04 +02:00
ath10k_htt_rx_frm_tx_compl ( htt - > ar , skb ) ;
dev_kfree_skb_any ( skb ) ;
}
2014-02-27 18:50:05 +02:00
spin_lock_bh ( & htt - > rx_ring . lock ) ;
while ( ( skb = __skb_dequeue ( & htt - > rx_compl_q ) ) ) {
2014-02-27 18:50:04 +02:00
resp = ( struct htt_resp * ) skb - > data ;
ath10k_htt_rx_handler ( htt , & resp - > rx_ind ) ;
dev_kfree_skb_any ( skb ) ;
}
2015-01-24 12:14:48 +02:00
while ( ( skb = __skb_dequeue ( & htt - > rx_in_ord_compl_q ) ) ) {
ath10k_htt_rx_in_ord_ind ( ar , skb ) ;
dev_kfree_skb_any ( skb ) ;
}
2014-02-27 18:50:05 +02:00
spin_unlock_bh ( & htt - > rx_ring . lock ) ;
2014-02-27 18:50:04 +02:00
}