2013-06-12 20:52:10 +03:00
/*
* Copyright ( c ) 2005 - 2011 Atheros Communications Inc .
* Copyright ( c ) 2011 - 2013 Qualcomm Atheros , Inc .
*
* Permission to use , copy , modify , and / or distribute this software for any
* purpose with or without fee is hereby granted , provided that the above
* copyright notice and this permission notice appear in all copies .
*
* THE SOFTWARE IS PROVIDED " AS IS " AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS . IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL , DIRECT , INDIRECT , OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
* ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE .
*/
2013-07-05 16:15:14 +03:00
# include "core.h"
2013-06-12 20:52:10 +03:00
# include "htc.h"
# include "htt.h"
# include "txrx.h"
# include "debug.h"
2013-09-03 11:43:55 +03:00
# include "trace.h"
2013-06-12 20:52:10 +03:00
# include <linux/log2.h>
/* slightly larger than one large A-MPDU */
# define HTT_RX_RING_SIZE_MIN 128
/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
# define HTT_RX_RING_SIZE_MAX 2048
# define HTT_RX_AVG_FRM_BYTES 1000
/* ms, very conservative */
# define HTT_RX_HOST_LATENCY_MAX_MS 20
/* ms, conservative */
# define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
/* when under memory pressure rx ring refill may fail and needs a retry */
# define HTT_RX_RING_REFILL_RETRY_MS 50
2013-09-26 10:12:22 +03:00
static int ath10k_htt_rx_get_csum_state ( struct sk_buff * skb ) ;
2014-02-27 18:50:04 +02:00
static void ath10k_htt_txrx_compl_task ( unsigned long ptr ) ;
2013-09-26 10:12:22 +03:00
2013-06-12 20:52:10 +03:00
static int ath10k_htt_rx_ring_size ( struct ath10k_htt * htt )
{
int size ;
/*
* It is expected that the host CPU will typically be able to
* service the rx indication from one A - MPDU before the rx
* indication from the subsequent A - MPDU happens , roughly 1 - 2 ms
* later . However , the rx ring should be sized very conservatively ,
* to accomodate the worst reasonable delay before the host CPU
* services a rx indication interrupt .
*
* The rx ring need not be kept full of empty buffers . In theory ,
* the htt host SW can dynamically track the low - water mark in the
* rx ring , and dynamically adjust the level to which the rx ring
* is filled with empty buffers , to dynamically meet the desired
* low - water mark .
*
* In contrast , it ' s difficult to resize the rx ring itself , once
* it ' s in use . Thus , the ring itself should be sized very
* conservatively , while the degree to which the ring is filled
* with empty buffers should be sized moderately conservatively .
*/
/* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
size =
htt - > max_throughput_mbps +
1000 /
( 8 * HTT_RX_AVG_FRM_BYTES ) * HTT_RX_HOST_LATENCY_MAX_MS ;
if ( size < HTT_RX_RING_SIZE_MIN )
size = HTT_RX_RING_SIZE_MIN ;
if ( size > HTT_RX_RING_SIZE_MAX )
size = HTT_RX_RING_SIZE_MAX ;
size = roundup_pow_of_two ( size ) ;
return size ;
}
static int ath10k_htt_rx_ring_fill_level ( struct ath10k_htt * htt )
{
int size ;
/* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
size =
htt - > max_throughput_mbps *
1000 /
( 8 * HTT_RX_AVG_FRM_BYTES ) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS ;
/*
* Make sure the fill level is at least 1 less than the ring size .
* Leaving 1 element empty allows the SW to easily distinguish
* between a full ring vs . an empty ring .
*/
if ( size > = htt - > rx_ring . size )
size = htt - > rx_ring . size - 1 ;
return size ;
}
static void ath10k_htt_rx_ring_free ( struct ath10k_htt * htt )
{
struct sk_buff * skb ;
struct ath10k_skb_cb * cb ;
int i ;
for ( i = 0 ; i < htt - > rx_ring . fill_cnt ; i + + ) {
skb = htt - > rx_ring . netbufs_ring [ i ] ;
cb = ATH10K_SKB_CB ( skb ) ;
dma_unmap_single ( htt - > ar - > dev , cb - > paddr ,
skb - > len + skb_tailroom ( skb ) ,
DMA_FROM_DEVICE ) ;
dev_kfree_skb_any ( skb ) ;
}
htt - > rx_ring . fill_cnt = 0 ;
}
static int __ath10k_htt_rx_ring_fill_n ( struct ath10k_htt * htt , int num )
{
struct htt_rx_desc * rx_desc ;
struct sk_buff * skb ;
dma_addr_t paddr ;
int ret = 0 , idx ;
idx = __le32_to_cpu ( * ( htt - > rx_ring . alloc_idx . vaddr ) ) ;
while ( num > 0 ) {
skb = dev_alloc_skb ( HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN ) ;
if ( ! skb ) {
ret = - ENOMEM ;
goto fail ;
}
if ( ! IS_ALIGNED ( ( unsigned long ) skb - > data , HTT_RX_DESC_ALIGN ) )
skb_pull ( skb ,
PTR_ALIGN ( skb - > data , HTT_RX_DESC_ALIGN ) -
skb - > data ) ;
/* Clear rx_desc attention word before posting to Rx ring */
rx_desc = ( struct htt_rx_desc * ) skb - > data ;
rx_desc - > attention . flags = __cpu_to_le32 ( 0 ) ;
paddr = dma_map_single ( htt - > ar - > dev , skb - > data ,
skb - > len + skb_tailroom ( skb ) ,
DMA_FROM_DEVICE ) ;
if ( unlikely ( dma_mapping_error ( htt - > ar - > dev , paddr ) ) ) {
dev_kfree_skb_any ( skb ) ;
ret = - ENOMEM ;
goto fail ;
}
ATH10K_SKB_CB ( skb ) - > paddr = paddr ;
htt - > rx_ring . netbufs_ring [ idx ] = skb ;
htt - > rx_ring . paddrs_ring [ idx ] = __cpu_to_le32 ( paddr ) ;
htt - > rx_ring . fill_cnt + + ;
num - - ;
idx + + ;
idx & = htt - > rx_ring . size_mask ;
}
fail :
* ( htt - > rx_ring . alloc_idx . vaddr ) = __cpu_to_le32 ( idx ) ;
return ret ;
}
static int ath10k_htt_rx_ring_fill_n ( struct ath10k_htt * htt , int num )
{
lockdep_assert_held ( & htt - > rx_ring . lock ) ;
return __ath10k_htt_rx_ring_fill_n ( htt , num ) ;
}
static void ath10k_htt_rx_msdu_buff_replenish ( struct ath10k_htt * htt )
{
2013-09-24 10:18:36 +02:00
int ret , num_deficit , num_to_fill ;
2013-06-12 20:52:10 +03:00
2013-09-24 10:18:36 +02:00
/* Refilling the whole RX ring buffer proves to be a bad idea. The
* reason is RX may take up significant amount of CPU cycles and starve
* other tasks , e . g . TX on an ethernet device while acting as a bridge
* with ath10k wlan interface . This ended up with very poor performance
* once CPU the host system was overwhelmed with RX on ath10k .
*
* By limiting the number of refills the replenishing occurs
* progressively . This in turns makes use of the fact tasklets are
* processed in FIFO order . This means actual RX processing can starve
* out refilling . If there ' s not enough buffers on RX ring FW will not
* report RX until it is refilled with enough buffers . This
* automatically balances load wrt to CPU power .
*
* This probably comes at a cost of lower maximum throughput but
* improves the avarage and stability . */
2013-06-12 20:52:10 +03:00
spin_lock_bh ( & htt - > rx_ring . lock ) ;
2013-09-24 10:18:36 +02:00
num_deficit = htt - > rx_ring . fill_level - htt - > rx_ring . fill_cnt ;
num_to_fill = min ( ATH10K_HTT_MAX_NUM_REFILL , num_deficit ) ;
num_deficit - = num_to_fill ;
2013-06-12 20:52:10 +03:00
ret = ath10k_htt_rx_ring_fill_n ( htt , num_to_fill ) ;
if ( ret = = - ENOMEM ) {
/*
* Failed to fill it to the desired level -
* we ' ll start a timer and try again next time .
* As long as enough buffers are left in the ring for
* another A - MPDU rx , no special recovery is needed .
*/
mod_timer ( & htt - > rx_ring . refill_retry_timer , jiffies +
msecs_to_jiffies ( HTT_RX_RING_REFILL_RETRY_MS ) ) ;
2013-09-24 10:18:36 +02:00
} else if ( num_deficit > 0 ) {
tasklet_schedule ( & htt - > rx_replenish_task ) ;
2013-06-12 20:52:10 +03:00
}
spin_unlock_bh ( & htt - > rx_ring . lock ) ;
}
static void ath10k_htt_rx_ring_refill_retry ( unsigned long arg )
{
struct ath10k_htt * htt = ( struct ath10k_htt * ) arg ;
ath10k_htt_rx_msdu_buff_replenish ( htt ) ;
}
2014-05-14 16:23:31 +03:00
static void ath10k_htt_rx_ring_clean_up ( struct ath10k_htt * htt )
2013-06-12 20:52:10 +03:00
{
2014-05-14 16:23:31 +03:00
struct sk_buff * skb ;
int i ;
for ( i = 0 ; i < htt - > rx_ring . size ; i + + ) {
skb = htt - > rx_ring . netbufs_ring [ i ] ;
if ( ! skb )
continue ;
dma_unmap_single ( htt - > ar - > dev , ATH10K_SKB_CB ( skb ) - > paddr ,
skb - > len + skb_tailroom ( skb ) ,
DMA_FROM_DEVICE ) ;
dev_kfree_skb_any ( skb ) ;
htt - > rx_ring . netbufs_ring [ i ] = NULL ;
}
}
2013-06-12 20:52:10 +03:00
2014-05-14 16:23:31 +03:00
void ath10k_htt_rx_detach ( struct ath10k_htt * htt )
{
2013-06-12 20:52:10 +03:00
del_timer_sync ( & htt - > rx_ring . refill_retry_timer ) ;
2013-09-24 10:18:36 +02:00
tasklet_kill ( & htt - > rx_replenish_task ) ;
2014-02-27 18:50:04 +02:00
tasklet_kill ( & htt - > txrx_compl_task ) ;
skb_queue_purge ( & htt - > tx_compl_q ) ;
skb_queue_purge ( & htt - > rx_compl_q ) ;
2013-06-12 20:52:10 +03:00
2014-05-14 16:23:31 +03:00
ath10k_htt_rx_ring_clean_up ( htt ) ;
2013-06-12 20:52:10 +03:00
dma_free_coherent ( htt - > ar - > dev ,
( htt - > rx_ring . size *
sizeof ( htt - > rx_ring . paddrs_ring ) ) ,
htt - > rx_ring . paddrs_ring ,
htt - > rx_ring . base_paddr ) ;
dma_free_coherent ( htt - > ar - > dev ,
sizeof ( * htt - > rx_ring . alloc_idx . vaddr ) ,
htt - > rx_ring . alloc_idx . vaddr ,
htt - > rx_ring . alloc_idx . paddr ) ;
kfree ( htt - > rx_ring . netbufs_ring ) ;
}
static inline struct sk_buff * ath10k_htt_rx_netbuf_pop ( struct ath10k_htt * htt )
{
int idx ;
struct sk_buff * msdu ;
2014-02-27 18:50:05 +02:00
lockdep_assert_held ( & htt - > rx_ring . lock ) ;
2013-06-12 20:52:10 +03:00
2014-02-27 18:50:05 +02:00
if ( htt - > rx_ring . fill_cnt = = 0 ) {
ath10k_warn ( " tried to pop sk_buff from an empty rx ring \n " ) ;
return NULL ;
}
2013-06-12 20:52:10 +03:00
idx = htt - > rx_ring . sw_rd_idx . msdu_payld ;
msdu = htt - > rx_ring . netbufs_ring [ idx ] ;
2014-05-14 16:23:31 +03:00
htt - > rx_ring . netbufs_ring [ idx ] = NULL ;
2013-06-12 20:52:10 +03:00
idx + + ;
idx & = htt - > rx_ring . size_mask ;
htt - > rx_ring . sw_rd_idx . msdu_payld = idx ;
htt - > rx_ring . fill_cnt - - ;
return msdu ;
}
static void ath10k_htt_rx_free_msdu_chain ( struct sk_buff * skb )
{
struct sk_buff * next ;
while ( skb ) {
next = skb - > next ;
dev_kfree_skb_any ( skb ) ;
skb = next ;
}
}
2014-03-24 21:23:20 +01:00
/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
2013-06-12 20:52:10 +03:00
static int ath10k_htt_rx_amsdu_pop ( struct ath10k_htt * htt ,
u8 * * fw_desc , int * fw_desc_len ,
struct sk_buff * * head_msdu ,
struct sk_buff * * tail_msdu )
{
int msdu_len , msdu_chaining = 0 ;
struct sk_buff * msdu ;
struct htt_rx_desc * rx_desc ;
2014-02-27 18:50:05 +02:00
lockdep_assert_held ( & htt - > rx_ring . lock ) ;
2013-06-12 20:52:10 +03:00
if ( htt - > rx_confused ) {
ath10k_warn ( " htt is confused. refusing rx \n " ) ;
2014-03-24 21:23:20 +01:00
return - 1 ;
2013-06-12 20:52:10 +03:00
}
msdu = * head_msdu = ath10k_htt_rx_netbuf_pop ( htt ) ;
while ( msdu ) {
int last_msdu , msdu_len_invalid , msdu_chained ;
dma_unmap_single ( htt - > ar - > dev ,
ATH10K_SKB_CB ( msdu ) - > paddr ,
msdu - > len + skb_tailroom ( msdu ) ,
DMA_FROM_DEVICE ) ;
2014-02-05 13:58:34 -08:00
ath10k_dbg_dump ( ATH10K_DBG_HTT_DUMP , NULL , " htt rx pop: " ,
2013-06-12 20:52:10 +03:00
msdu - > data , msdu - > len + skb_tailroom ( msdu ) ) ;
rx_desc = ( struct htt_rx_desc * ) msdu - > data ;
/* FIXME: we must report msdu payload since this is what caller
* expects now */
skb_put ( msdu , offsetof ( struct htt_rx_desc , msdu_payload ) ) ;
skb_pull ( msdu , offsetof ( struct htt_rx_desc , msdu_payload ) ) ;
/*
* Sanity check - confirm the HW is finished filling in the
* rx data .
* If the HW and SW are working correctly , then it ' s guaranteed
* that the HW ' s MAC DMA is done before this point in the SW .
* To prevent the case that we handle a stale Rx descriptor ,
* just assert for now until we have a way to recover .
*/
if ( ! ( __le32_to_cpu ( rx_desc - > attention . flags )
& RX_ATTENTION_FLAGS_MSDU_DONE ) ) {
ath10k_htt_rx_free_msdu_chain ( * head_msdu ) ;
* head_msdu = NULL ;
msdu = NULL ;
ath10k_err ( " htt rx stopped. cannot recover \n " ) ;
htt - > rx_confused = true ;
break ;
}
/*
* Copy the FW rx descriptor for this MSDU from the rx
* indication message into the MSDU ' s netbuf . HL uses the
* same rx indication message definition as LL , and simply
* appends new info ( fields from the HW rx desc , and the
* MSDU payload itself ) . So , the offset into the rx
* indication message only has to account for the standard
* offset of the per - MSDU FW rx desc info within the
* message , and how many bytes of the per - MSDU FW rx desc
* info have already been consumed . ( And the endianness of
* the host , since for a big - endian host , the rx ind
* message contents , including the per - MSDU rx desc bytes ,
* were byteswapped during upload . )
*/
if ( * fw_desc_len > 0 ) {
rx_desc - > fw_desc . info0 = * * fw_desc ;
/*
* The target is expected to only provide the basic
* per - MSDU rx descriptors . Just to be sure , verify
* that the target has not attached extension data
* ( e . g . LRO flow ID ) .
*/
/* or more, if there's extension data */
( * fw_desc ) + + ;
( * fw_desc_len ) - - ;
} else {
/*
* When an oversized AMSDU happened , FW will lost
* some of MSDU status - in this case , the FW
* descriptors provided will be less than the
* actual MSDUs inside this MPDU . Mark the FW
* descriptors so that it will still deliver to
* upper stack , if no CRC error for this MPDU .
*
* FIX THIS - the FW descriptors are actually for
* MSDUs in the end of this A - MSDU instead of the
* beginning .
*/
rx_desc - > fw_desc . info0 = 0 ;
}
msdu_len_invalid = ! ! ( __le32_to_cpu ( rx_desc - > attention . flags )
& ( RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR ) ) ;
msdu_len = MS ( __le32_to_cpu ( rx_desc - > msdu_start . info0 ) ,
RX_MSDU_START_INFO0_MSDU_LENGTH ) ;
msdu_chained = rx_desc - > frag_info . ring2_more_count ;
2014-03-03 14:07:09 -08:00
msdu_chaining = msdu_chained ;
2013-06-12 20:52:10 +03:00
if ( msdu_len_invalid )
msdu_len = 0 ;
skb_trim ( msdu , 0 ) ;
skb_put ( msdu , min ( msdu_len , HTT_RX_MSDU_SIZE ) ) ;
msdu_len - = msdu - > len ;
/* FIXME: Do chained buffers include htt_rx_desc or not? */
while ( msdu_chained - - ) {
struct sk_buff * next = ath10k_htt_rx_netbuf_pop ( htt ) ;
dma_unmap_single ( htt - > ar - > dev ,
ATH10K_SKB_CB ( next ) - > paddr ,
next - > len + skb_tailroom ( next ) ,
DMA_FROM_DEVICE ) ;
2014-02-05 13:58:34 -08:00
ath10k_dbg_dump ( ATH10K_DBG_HTT_DUMP , NULL ,
" htt rx chained: " , next - > data ,
2013-06-12 20:52:10 +03:00
next - > len + skb_tailroom ( next ) ) ;
skb_trim ( next , 0 ) ;
skb_put ( next , min ( msdu_len , HTT_RX_BUF_SIZE ) ) ;
msdu_len - = next - > len ;
msdu - > next = next ;
msdu = next ;
}
last_msdu = __le32_to_cpu ( rx_desc - > msdu_end . info0 ) &
RX_MSDU_END_INFO0_LAST_MSDU ;
if ( last_msdu ) {
msdu - > next = NULL ;
break ;
} else {
struct sk_buff * next = ath10k_htt_rx_netbuf_pop ( htt ) ;
msdu - > next = next ;
msdu = next ;
}
}
* tail_msdu = msdu ;
2014-03-24 21:23:20 +01:00
if ( * head_msdu = = NULL )
msdu_chaining = - 1 ;
2013-06-12 20:52:10 +03:00
/*
* Don ' t refill the ring yet .
*
* First , the elements popped here are still in use - it is not
* safe to overwrite them until the matching call to
* mpdu_desc_list_next . Second , for efficiency it is preferable to
* refill the rx ring with 1 PPDU ' s worth of rx buffers ( something
* like 32 x 3 buffers ) , rather than one MPDU ' s worth of rx buffers
* ( something like 3 buffers ) . Consequently , we ' ll rely on the txrx
* SW to tell us when it is done pulling all the PPDU ' s rx buffers
* out of the rx ring , and then refill it just once .
*/
return msdu_chaining ;
}
2013-09-24 10:18:36 +02:00
static void ath10k_htt_rx_replenish_task ( unsigned long ptr )
{
struct ath10k_htt * htt = ( struct ath10k_htt * ) ptr ;
ath10k_htt_rx_msdu_buff_replenish ( htt ) ;
}
2013-06-12 20:52:10 +03:00
int ath10k_htt_rx_attach ( struct ath10k_htt * htt )
{
dma_addr_t paddr ;
void * vaddr ;
struct timer_list * timer = & htt - > rx_ring . refill_retry_timer ;
htt - > rx_ring . size = ath10k_htt_rx_ring_size ( htt ) ;
if ( ! is_power_of_2 ( htt - > rx_ring . size ) ) {
ath10k_warn ( " htt rx ring size is not power of 2 \n " ) ;
return - EINVAL ;
}
htt - > rx_ring . size_mask = htt - > rx_ring . size - 1 ;
/*
* Set the initial value for the level to which the rx ring
* should be filled , based on the max throughput and the
* worst likely latency for the host to fill the rx ring
* with new buffers . In theory , this fill level can be
* dynamically adjusted from the initial value set here , to
* reflect the actual host latency rather than a
* conservative assumption about the host latency .
*/
htt - > rx_ring . fill_level = ath10k_htt_rx_ring_fill_level ( htt ) ;
htt - > rx_ring . netbufs_ring =
2014-05-14 16:23:31 +03:00
kzalloc ( htt - > rx_ring . size * sizeof ( struct sk_buff * ) ,
2013-06-12 20:52:10 +03:00
GFP_KERNEL ) ;
if ( ! htt - > rx_ring . netbufs_ring )
goto err_netbuf ;
vaddr = dma_alloc_coherent ( htt - > ar - > dev ,
( htt - > rx_ring . size * sizeof ( htt - > rx_ring . paddrs_ring ) ) ,
& paddr , GFP_DMA ) ;
if ( ! vaddr )
goto err_dma_ring ;
htt - > rx_ring . paddrs_ring = vaddr ;
htt - > rx_ring . base_paddr = paddr ;
vaddr = dma_alloc_coherent ( htt - > ar - > dev ,
sizeof ( * htt - > rx_ring . alloc_idx . vaddr ) ,
& paddr , GFP_DMA ) ;
if ( ! vaddr )
goto err_dma_idx ;
htt - > rx_ring . alloc_idx . vaddr = vaddr ;
htt - > rx_ring . alloc_idx . paddr = paddr ;
htt - > rx_ring . sw_rd_idx . msdu_payld = 0 ;
* htt - > rx_ring . alloc_idx . vaddr = 0 ;
/* Initialize the Rx refill retry timer */
setup_timer ( timer , ath10k_htt_rx_ring_refill_retry , ( unsigned long ) htt ) ;
spin_lock_init ( & htt - > rx_ring . lock ) ;
htt - > rx_ring . fill_cnt = 0 ;
if ( __ath10k_htt_rx_ring_fill_n ( htt , htt - > rx_ring . fill_level ) )
goto err_fill_ring ;
2013-09-24 10:18:36 +02:00
tasklet_init ( & htt - > rx_replenish_task , ath10k_htt_rx_replenish_task ,
( unsigned long ) htt ) ;
2014-02-27 18:50:04 +02:00
skb_queue_head_init ( & htt - > tx_compl_q ) ;
skb_queue_head_init ( & htt - > rx_compl_q ) ;
tasklet_init ( & htt - > txrx_compl_task , ath10k_htt_txrx_compl_task ,
( unsigned long ) htt ) ;
2013-09-08 17:56:02 +03:00
ath10k_dbg ( ATH10K_DBG_BOOT , " htt rx ring size %d fill_level %d \n " ,
2013-06-12 20:52:10 +03:00
htt - > rx_ring . size , htt - > rx_ring . fill_level ) ;
return 0 ;
err_fill_ring :
ath10k_htt_rx_ring_free ( htt ) ;
dma_free_coherent ( htt - > ar - > dev ,
sizeof ( * htt - > rx_ring . alloc_idx . vaddr ) ,
htt - > rx_ring . alloc_idx . vaddr ,
htt - > rx_ring . alloc_idx . paddr ) ;
err_dma_idx :
dma_free_coherent ( htt - > ar - > dev ,
( htt - > rx_ring . size *
sizeof ( htt - > rx_ring . paddrs_ring ) ) ,
htt - > rx_ring . paddrs_ring ,
htt - > rx_ring . base_paddr ) ;
err_dma_ring :
kfree ( htt - > rx_ring . netbufs_ring ) ;
err_netbuf :
return - ENOMEM ;
}
static int ath10k_htt_rx_crypto_param_len ( enum htt_rx_mpdu_encrypt_type type )
{
switch ( type ) {
case HTT_RX_MPDU_ENCRYPT_WEP40 :
case HTT_RX_MPDU_ENCRYPT_WEP104 :
return 4 ;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC :
case HTT_RX_MPDU_ENCRYPT_WEP128 : /* not tested */
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA :
case HTT_RX_MPDU_ENCRYPT_WAPI : /* not tested */
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 :
return 8 ;
case HTT_RX_MPDU_ENCRYPT_NONE :
return 0 ;
}
ath10k_warn ( " unknown encryption type %d \n " , type ) ;
return 0 ;
}
static int ath10k_htt_rx_crypto_tail_len ( enum htt_rx_mpdu_encrypt_type type )
{
switch ( type ) {
case HTT_RX_MPDU_ENCRYPT_NONE :
case HTT_RX_MPDU_ENCRYPT_WEP40 :
case HTT_RX_MPDU_ENCRYPT_WEP104 :
case HTT_RX_MPDU_ENCRYPT_WEP128 :
case HTT_RX_MPDU_ENCRYPT_WAPI :
return 0 ;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC :
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA :
return 4 ;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 :
return 8 ;
}
ath10k_warn ( " unknown encryption type %d \n " , type ) ;
return 0 ;
}
/* Applies for first msdu in chain, before altering it. */
static struct ieee80211_hdr * ath10k_htt_rx_skb_get_hdr ( struct sk_buff * skb )
{
struct htt_rx_desc * rxd ;
enum rx_msdu_decap_format fmt ;
rxd = ( void * ) skb - > data - sizeof ( * rxd ) ;
fmt = MS ( __le32_to_cpu ( rxd - > msdu_start . info1 ) ,
RX_MSDU_START_INFO1_DECAP_FORMAT ) ;
if ( fmt = = RX_MSDU_DECAP_RAW )
return ( void * ) skb - > data ;
else
return ( void * ) skb - > data - RX_HTT_HDR_STATUS_LEN ;
}
/* This function only applies for first msdu in an msdu chain */
static bool ath10k_htt_rx_hdr_is_amsdu ( struct ieee80211_hdr * hdr )
{
if ( ieee80211_is_data_qos ( hdr - > frame_control ) ) {
u8 * qc = ieee80211_get_qos_ctl ( hdr ) ;
if ( qc [ 0 ] & 0x80 )
return true ;
}
return false ;
}
2013-09-26 10:12:22 +03:00
struct rfc1042_hdr {
u8 llc_dsap ;
u8 llc_ssap ;
u8 llc_ctrl ;
u8 snap_oui [ 3 ] ;
__be16 snap_type ;
} __packed ;
struct amsdu_subframe_hdr {
u8 dst [ ETH_ALEN ] ;
u8 src [ ETH_ALEN ] ;
__be16 len ;
} __packed ;
2014-03-24 21:23:15 +01:00
static const u8 rx_legacy_rate_idx [ ] = {
3 , /* 0x00 - 11Mbps */
2 , /* 0x01 - 5.5Mbps */
1 , /* 0x02 - 2Mbps */
0 , /* 0x03 - 1Mbps */
3 , /* 0x04 - 11Mbps */
2 , /* 0x05 - 5.5Mbps */
1 , /* 0x06 - 2Mbps */
0 , /* 0x07 - 1Mbps */
10 , /* 0x08 - 48Mbps */
8 , /* 0x09 - 24Mbps */
6 , /* 0x0A - 12Mbps */
4 , /* 0x0B - 6Mbps */
11 , /* 0x0C - 54Mbps */
9 , /* 0x0D - 36Mbps */
7 , /* 0x0E - 18Mbps */
5 , /* 0x0F - 9Mbps */
} ;
2014-03-24 21:23:19 +01:00
static void ath10k_htt_rx_h_rates ( struct ath10k * ar ,
2014-03-24 21:23:16 +01:00
enum ieee80211_band band ,
2014-03-24 21:23:19 +01:00
u8 info0 , u32 info1 , u32 info2 ,
2014-03-24 21:23:16 +01:00
struct ieee80211_rx_status * status )
2014-03-24 21:23:15 +01:00
{
u8 cck , rate , rate_idx , bw , sgi , mcs , nss ;
u8 preamble = 0 ;
/* Check if valid fields */
if ( ! ( info0 & HTT_RX_INDICATION_INFO0_START_VALID ) )
return ;
preamble = MS ( info1 , HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE ) ;
switch ( preamble ) {
case HTT_RX_LEGACY :
cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK ;
rate = MS ( info0 , HTT_RX_INDICATION_INFO0_LEGACY_RATE ) ;
rate_idx = 0 ;
if ( rate < 0x08 | | rate > 0x0F )
break ;
switch ( band ) {
case IEEE80211_BAND_2GHZ :
if ( cck )
rate & = ~ BIT ( 3 ) ;
rate_idx = rx_legacy_rate_idx [ rate ] ;
break ;
case IEEE80211_BAND_5GHZ :
rate_idx = rx_legacy_rate_idx [ rate ] ;
/* We are using same rate table registering
HW - ath10k_rates [ ] . In case of 5 GHz skip
CCK rates , so - 4 here */
rate_idx - = 4 ;
break ;
default :
break ;
}
status - > rate_idx = rate_idx ;
break ;
case HTT_RX_HT :
case HTT_RX_HT_WITH_TXBF :
/* HT-SIG - Table 20-11 in info1 and info2 */
mcs = info1 & 0x1F ;
nss = mcs > > 3 ;
bw = ( info1 > > 7 ) & 1 ;
sgi = ( info2 > > 7 ) & 1 ;
status - > rate_idx = mcs ;
status - > flag | = RX_FLAG_HT ;
if ( sgi )
status - > flag | = RX_FLAG_SHORT_GI ;
if ( bw )
status - > flag | = RX_FLAG_40MHZ ;
break ;
case HTT_RX_VHT :
case HTT_RX_VHT_WITH_TXBF :
/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
TODO check this */
mcs = ( info2 > > 4 ) & 0x0F ;
nss = ( ( info1 > > 10 ) & 0x07 ) + 1 ;
bw = info1 & 3 ;
sgi = info2 & 1 ;
status - > rate_idx = mcs ;
status - > vht_nss = nss ;
if ( sgi )
status - > flag | = RX_FLAG_SHORT_GI ;
switch ( bw ) {
/* 20MHZ */
case 0 :
break ;
/* 40MHZ */
case 1 :
status - > flag | = RX_FLAG_40MHZ ;
break ;
/* 80MHZ */
case 2 :
status - > vht_flag | = RX_VHT_FLAG_80MHZ ;
}
status - > flag | = RX_FLAG_VHT ;
break ;
default :
break ;
}
}
2014-03-24 21:23:19 +01:00
static void ath10k_htt_rx_h_protected ( struct ath10k_htt * htt ,
2014-03-24 21:23:22 +01:00
struct ieee80211_rx_status * rx_status ,
struct sk_buff * skb ,
2014-03-24 21:23:19 +01:00
enum htt_rx_mpdu_encrypt_type enctype )
{
2014-03-24 21:23:22 +01:00
struct ieee80211_hdr * hdr = ( struct ieee80211_hdr * ) skb - > data ;
2014-03-24 21:23:19 +01:00
if ( enctype = = HTT_RX_MPDU_ENCRYPT_NONE ) {
2014-03-24 21:23:22 +01:00
rx_status - > flag & = ~ ( RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED ) ;
2014-03-24 21:23:19 +01:00
return ;
}
2014-03-24 21:23:22 +01:00
rx_status - > flag | = RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED ;
2014-03-24 21:23:19 +01:00
hdr - > frame_control = __cpu_to_le16 ( __le16_to_cpu ( hdr - > frame_control ) &
~ IEEE80211_FCTL_PROTECTED ) ;
}
2014-03-24 21:23:18 +01:00
static bool ath10k_htt_rx_h_channel ( struct ath10k * ar ,
struct ieee80211_rx_status * status )
{
struct ieee80211_channel * ch ;
spin_lock_bh ( & ar - > data_lock ) ;
ch = ar - > scan_channel ;
if ( ! ch )
ch = ar - > rx_channel ;
spin_unlock_bh ( & ar - > data_lock ) ;
if ( ! ch )
return false ;
status - > band = ch - > band ;
status - > freq = ch - > center_freq ;
return true ;
}
2014-03-24 21:23:22 +01:00
static void ath10k_process_rx ( struct ath10k * ar ,
struct ieee80211_rx_status * rx_status ,
struct sk_buff * skb )
2014-03-24 21:23:15 +01:00
{
struct ieee80211_rx_status * status ;
2014-03-24 21:23:22 +01:00
status = IEEE80211_SKB_RXCB ( skb ) ;
* status = * rx_status ;
2014-03-24 21:23:15 +01:00
ath10k_dbg ( ATH10K_DBG_DATA ,
2014-03-24 21:23:21 +01:00
" rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i \n " ,
2014-03-24 21:23:22 +01:00
skb ,
skb - > len ,
2014-03-24 21:23:15 +01:00
status - > flag = = 0 ? " legacy " : " " ,
status - > flag & RX_FLAG_HT ? " ht " : " " ,
status - > flag & RX_FLAG_VHT ? " vht " : " " ,
status - > flag & RX_FLAG_40MHZ ? " 40 " : " " ,
status - > vht_flag & RX_VHT_FLAG_80MHZ ? " 80 " : " " ,
status - > flag & RX_FLAG_SHORT_GI ? " sgi " : " " ,
status - > rate_idx ,
status - > vht_nss ,
status - > freq ,
2014-03-24 21:23:19 +01:00
status - > band , status - > flag ,
2014-03-24 21:23:21 +01:00
! ! ( status - > flag & RX_FLAG_FAILED_FCS_CRC ) ,
! ! ( status - > flag & RX_FLAG_MMIC_ERROR ) ) ;
2014-03-24 21:23:15 +01:00
ath10k_dbg_dump ( ATH10K_DBG_HTT_DUMP , NULL , " rx skb: " ,
2014-03-24 21:23:22 +01:00
skb - > data , skb - > len ) ;
2014-03-24 21:23:15 +01:00
2014-03-24 21:23:22 +01:00
ieee80211_rx ( ar - > hw , skb ) ;
2014-03-24 21:23:15 +01:00
}
2014-02-25 09:29:57 +02:00
static int ath10k_htt_rx_nwifi_hdrlen ( struct ieee80211_hdr * hdr )
{
/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
return round_up ( ieee80211_hdrlen ( hdr - > frame_control ) , 4 ) ;
}
2013-09-26 10:12:22 +03:00
static void ath10k_htt_rx_amsdu ( struct ath10k_htt * htt ,
2014-03-24 21:23:22 +01:00
struct ieee80211_rx_status * rx_status ,
struct sk_buff * skb_in )
2013-06-12 20:52:10 +03:00
{
struct htt_rx_desc * rxd ;
2014-03-24 21:23:22 +01:00
struct sk_buff * skb = skb_in ;
2013-06-12 20:52:10 +03:00
struct sk_buff * first ;
enum rx_msdu_decap_format fmt ;
enum htt_rx_mpdu_encrypt_type enctype ;
2013-09-26 10:12:22 +03:00
struct ieee80211_hdr * hdr ;
2013-09-26 10:12:23 +03:00
u8 hdr_buf [ 64 ] , addr [ ETH_ALEN ] , * qos ;
2013-06-12 20:52:10 +03:00
unsigned int hdr_len ;
rxd = ( void * ) skb - > data - sizeof ( * rxd ) ;
enctype = MS ( __le32_to_cpu ( rxd - > mpdu_start . info0 ) ,
RX_MPDU_START_INFO0_ENCRYPT_TYPE ) ;
2013-09-26 10:12:22 +03:00
hdr = ( struct ieee80211_hdr * ) rxd - > rx_hdr_status ;
hdr_len = ieee80211_hdrlen ( hdr - > frame_control ) ;
memcpy ( hdr_buf , hdr , hdr_len ) ;
hdr = ( struct ieee80211_hdr * ) hdr_buf ;
2013-06-12 20:52:10 +03:00
first = skb ;
while ( skb ) {
void * decap_hdr ;
2013-09-26 10:12:22 +03:00
int len ;
2013-06-12 20:52:10 +03:00
rxd = ( void * ) skb - > data - sizeof ( * rxd ) ;
fmt = MS ( __le32_to_cpu ( rxd - > msdu_start . info1 ) ,
2013-09-26 10:12:22 +03:00
RX_MSDU_START_INFO1_DECAP_FORMAT ) ;
2013-06-12 20:52:10 +03:00
decap_hdr = ( void * ) rxd - > rx_hdr_status ;
2013-09-26 10:12:22 +03:00
skb - > ip_summed = ath10k_htt_rx_get_csum_state ( skb ) ;
2013-06-12 20:52:10 +03:00
2013-09-26 10:12:22 +03:00
/* First frame in an A-MSDU chain has more decapped data. */
if ( skb = = first ) {
len = round_up ( ieee80211_hdrlen ( hdr - > frame_control ) , 4 ) ;
len + = round_up ( ath10k_htt_rx_crypto_param_len ( enctype ) ,
4 ) ;
decap_hdr + = len ;
2013-06-12 20:52:10 +03:00
}
2013-09-26 10:12:22 +03:00
switch ( fmt ) {
case RX_MSDU_DECAP_RAW :
2013-09-26 10:12:23 +03:00
/* remove trailing FCS */
2013-09-26 10:12:22 +03:00
skb_trim ( skb , skb - > len - FCS_LEN ) ;
break ;
case RX_MSDU_DECAP_NATIVE_WIFI :
2013-09-26 10:12:23 +03:00
/* pull decapped header and copy DA */
hdr = ( struct ieee80211_hdr * ) skb - > data ;
2014-02-25 09:29:57 +02:00
hdr_len = ath10k_htt_rx_nwifi_hdrlen ( hdr ) ;
2013-09-26 10:12:23 +03:00
memcpy ( addr , ieee80211_get_DA ( hdr ) , ETH_ALEN ) ;
skb_pull ( skb , hdr_len ) ;
/* push original 802.11 header */
hdr = ( struct ieee80211_hdr * ) hdr_buf ;
hdr_len = ieee80211_hdrlen ( hdr - > frame_control ) ;
memcpy ( skb_push ( skb , hdr_len ) , hdr , hdr_len ) ;
/* original A-MSDU header has the bit set but we're
* not including A - MSDU subframe header */
hdr = ( struct ieee80211_hdr * ) skb - > data ;
qos = ieee80211_get_qos_ctl ( hdr ) ;
qos [ 0 ] & = ~ IEEE80211_QOS_CTL_A_MSDU_PRESENT ;
/* original 802.11 header has a different DA */
memcpy ( ieee80211_get_DA ( hdr ) , addr , ETH_ALEN ) ;
2013-09-26 10:12:22 +03:00
break ;
case RX_MSDU_DECAP_ETHERNET2_DIX :
2013-09-26 10:12:23 +03:00
/* strip ethernet header and insert decapped 802.11
* header , amsdu subframe header and rfc1042 header */
2013-09-26 10:12:22 +03:00
len = 0 ;
len + = sizeof ( struct rfc1042_hdr ) ;
len + = sizeof ( struct amsdu_subframe_hdr ) ;
skb_pull ( skb , sizeof ( struct ethhdr ) ) ;
memcpy ( skb_push ( skb , len ) , decap_hdr , len ) ;
memcpy ( skb_push ( skb , hdr_len ) , hdr , hdr_len ) ;
break ;
case RX_MSDU_DECAP_8023_SNAP_LLC :
2013-09-26 10:12:23 +03:00
/* insert decapped 802.11 header making a singly
* A - MSDU */
2013-09-26 10:12:22 +03:00
memcpy ( skb_push ( skb , hdr_len ) , hdr , hdr_len ) ;
break ;
2013-06-12 20:52:10 +03:00
}
2014-03-24 21:23:22 +01:00
skb_in = skb ;
ath10k_htt_rx_h_protected ( htt , rx_status , skb_in , enctype ) ;
2013-06-12 20:52:10 +03:00
skb = skb - > next ;
2014-03-24 21:23:22 +01:00
skb_in - > next = NULL ;
2013-06-12 20:52:10 +03:00
2013-11-13 15:23:30 +02:00
if ( skb )
2014-03-24 21:23:22 +01:00
rx_status - > flag | = RX_FLAG_AMSDU_MORE ;
2014-03-24 21:23:19 +01:00
else
2014-03-24 21:23:22 +01:00
rx_status - > flag & = ~ RX_FLAG_AMSDU_MORE ;
2013-11-13 15:23:30 +02:00
2014-03-24 21:23:22 +01:00
ath10k_process_rx ( htt - > ar , rx_status , skb_in ) ;
2013-09-26 10:12:22 +03:00
}
2013-06-12 20:52:10 +03:00
2013-09-26 10:12:22 +03:00
/* FIXME: It might be nice to re-assemble the A-MSDU when there's a
* monitor interface active for sniffing purposes . */
2013-06-12 20:52:10 +03:00
}
2014-03-24 21:23:22 +01:00
static void ath10k_htt_rx_msdu ( struct ath10k_htt * htt ,
struct ieee80211_rx_status * rx_status ,
struct sk_buff * skb )
2013-06-12 20:52:10 +03:00
{
struct htt_rx_desc * rxd ;
struct ieee80211_hdr * hdr ;
enum rx_msdu_decap_format fmt ;
enum htt_rx_mpdu_encrypt_type enctype ;
2013-09-26 10:12:23 +03:00
int hdr_len ;
void * rfc1042 ;
2013-06-12 20:52:10 +03:00
/* This shouldn't happen. If it does than it may be a FW bug. */
if ( skb - > next ) {
2014-02-05 13:58:34 -08:00
ath10k_warn ( " htt rx received chained non A-MSDU frame \n " ) ;
2013-06-12 20:52:10 +03:00
ath10k_htt_rx_free_msdu_chain ( skb - > next ) ;
skb - > next = NULL ;
}
rxd = ( void * ) skb - > data - sizeof ( * rxd ) ;
fmt = MS ( __le32_to_cpu ( rxd - > msdu_start . info1 ) ,
RX_MSDU_START_INFO1_DECAP_FORMAT ) ;
enctype = MS ( __le32_to_cpu ( rxd - > mpdu_start . info0 ) ,
RX_MPDU_START_INFO0_ENCRYPT_TYPE ) ;
2013-09-26 10:12:23 +03:00
hdr = ( struct ieee80211_hdr * ) rxd - > rx_hdr_status ;
hdr_len = ieee80211_hdrlen ( hdr - > frame_control ) ;
2013-06-12 20:52:10 +03:00
2013-09-26 10:12:22 +03:00
skb - > ip_summed = ath10k_htt_rx_get_csum_state ( skb ) ;
2013-06-12 20:52:10 +03:00
switch ( fmt ) {
case RX_MSDU_DECAP_RAW :
/* remove trailing FCS */
2013-09-26 10:12:23 +03:00
skb_trim ( skb , skb - > len - FCS_LEN ) ;
2013-06-12 20:52:10 +03:00
break ;
case RX_MSDU_DECAP_NATIVE_WIFI :
2013-09-26 10:12:23 +03:00
/* Pull decapped header */
hdr = ( struct ieee80211_hdr * ) skb - > data ;
2014-02-25 09:29:57 +02:00
hdr_len = ath10k_htt_rx_nwifi_hdrlen ( hdr ) ;
2013-09-26 10:12:23 +03:00
skb_pull ( skb , hdr_len ) ;
/* Push original header */
hdr = ( struct ieee80211_hdr * ) rxd - > rx_hdr_status ;
hdr_len = ieee80211_hdrlen ( hdr - > frame_control ) ;
memcpy ( skb_push ( skb , hdr_len ) , hdr , hdr_len ) ;
2013-06-12 20:52:10 +03:00
break ;
case RX_MSDU_DECAP_ETHERNET2_DIX :
2013-09-26 10:12:23 +03:00
/* strip ethernet header and insert decapped 802.11 header and
* rfc1042 header */
2013-06-12 20:52:10 +03:00
2013-09-26 10:12:23 +03:00
rfc1042 = hdr ;
rfc1042 + = roundup ( hdr_len , 4 ) ;
rfc1042 + = roundup ( ath10k_htt_rx_crypto_param_len ( enctype ) , 4 ) ;
2013-06-12 20:52:10 +03:00
2013-09-26 10:12:23 +03:00
skb_pull ( skb , sizeof ( struct ethhdr ) ) ;
memcpy ( skb_push ( skb , sizeof ( struct rfc1042_hdr ) ) ,
rfc1042 , sizeof ( struct rfc1042_hdr ) ) ;
memcpy ( skb_push ( skb , hdr_len ) , hdr , hdr_len ) ;
break ;
case RX_MSDU_DECAP_8023_SNAP_LLC :
/* remove A-MSDU subframe header and insert
* decapped 802.11 header . rfc1042 header is already there */
2013-06-12 20:52:10 +03:00
2013-09-26 10:12:23 +03:00
skb_pull ( skb , sizeof ( struct amsdu_subframe_hdr ) ) ;
memcpy ( skb_push ( skb , hdr_len ) , hdr , hdr_len ) ;
break ;
2013-06-12 20:52:10 +03:00
}
2014-03-24 21:23:22 +01:00
ath10k_htt_rx_h_protected ( htt , rx_status , skb , enctype ) ;
2013-09-26 10:12:22 +03:00
2014-03-24 21:23:22 +01:00
ath10k_process_rx ( htt - > ar , rx_status , skb ) ;
2013-06-12 20:52:10 +03:00
}
2013-07-31 10:47:56 +02:00
static int ath10k_htt_rx_get_csum_state ( struct sk_buff * skb )
{
struct htt_rx_desc * rxd ;
u32 flags , info ;
bool is_ip4 , is_ip6 ;
bool is_tcp , is_udp ;
bool ip_csum_ok , tcpudp_csum_ok ;
rxd = ( void * ) skb - > data - sizeof ( * rxd ) ;
flags = __le32_to_cpu ( rxd - > attention . flags ) ;
info = __le32_to_cpu ( rxd - > msdu_start . info1 ) ;
is_ip4 = ! ! ( info & RX_MSDU_START_INFO1_IPV4_PROTO ) ;
is_ip6 = ! ! ( info & RX_MSDU_START_INFO1_IPV6_PROTO ) ;
is_tcp = ! ! ( info & RX_MSDU_START_INFO1_TCP_PROTO ) ;
is_udp = ! ! ( info & RX_MSDU_START_INFO1_UDP_PROTO ) ;
ip_csum_ok = ! ( flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL ) ;
tcpudp_csum_ok = ! ( flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL ) ;
if ( ! is_ip4 & & ! is_ip6 )
return CHECKSUM_NONE ;
if ( ! is_tcp & & ! is_udp )
return CHECKSUM_NONE ;
if ( ! ip_csum_ok )
return CHECKSUM_NONE ;
if ( ! tcpudp_csum_ok )
return CHECKSUM_NONE ;
return CHECKSUM_UNNECESSARY ;
}
2014-03-03 14:07:09 -08:00
static int ath10k_unchain_msdu ( struct sk_buff * msdu_head )
{
struct sk_buff * next = msdu_head - > next ;
struct sk_buff * to_free = next ;
int space ;
int total_len = 0 ;
/* TODO: Might could optimize this by using
* skb_try_coalesce or similar method to
* decrease copying , or maybe get mac80211 to
* provide a way to just receive a list of
* skb ?
*/
msdu_head - > next = NULL ;
/* Allocate total length all at once. */
while ( next ) {
total_len + = next - > len ;
next = next - > next ;
}
space = total_len - skb_tailroom ( msdu_head ) ;
if ( ( space > 0 ) & &
( pskb_expand_head ( msdu_head , 0 , space , GFP_ATOMIC ) < 0 ) ) {
/* TODO: bump some rx-oom error stat */
/* put it back together so we can free the
* whole list at once .
*/
msdu_head - > next = to_free ;
return - 1 ;
}
/* Walk list again, copying contents into
* msdu_head
*/
next = to_free ;
while ( next ) {
skb_copy_from_linear_data ( next , skb_put ( msdu_head , next - > len ) ,
next - > len ) ;
next = next - > next ;
}
/* If here, we have consolidated skb. Free the
* fragments and pass the main skb on up the
* stack .
*/
ath10k_htt_rx_free_msdu_chain ( to_free ) ;
return 0 ;
}
2014-03-19 07:09:40 +01:00
static bool ath10k_htt_rx_amsdu_allowed ( struct ath10k_htt * htt ,
struct sk_buff * head ,
2014-03-24 21:23:19 +01:00
enum htt_rx_mpdu_status status ,
2014-03-24 21:23:21 +01:00
bool channel_set ,
u32 attention )
2014-03-19 07:09:40 +01:00
{
if ( head - > len = = 0 ) {
ath10k_dbg ( ATH10K_DBG_HTT ,
" htt rx dropping due to zero-len \n " ) ;
return false ;
}
2014-03-24 21:23:21 +01:00
if ( attention & RX_ATTENTION_FLAGS_DECRYPT_ERR ) {
2014-03-19 07:09:40 +01:00
ath10k_dbg ( ATH10K_DBG_HTT ,
" htt rx dropping due to decrypt-err \n " ) ;
return false ;
}
2014-03-24 21:23:18 +01:00
if ( ! channel_set ) {
ath10k_warn ( " no channel configured; ignoring frame! \n " ) ;
return false ;
}
2014-03-19 07:09:40 +01:00
/* Skip mgmt frames while we handle this in WMI */
if ( status = = HTT_RX_IND_MPDU_STATUS_MGMT_CTRL | |
2014-03-24 21:23:21 +01:00
attention & RX_ATTENTION_FLAGS_MGMT_TYPE ) {
2014-03-19 07:09:40 +01:00
ath10k_dbg ( ATH10K_DBG_HTT , " htt rx mgmt ctrl \n " ) ;
return false ;
}
if ( status ! = HTT_RX_IND_MPDU_STATUS_OK & &
status ! = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR & &
status ! = HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER & &
2014-04-08 09:45:47 +03:00
! htt - > ar - > monitor_started ) {
2014-03-19 07:09:40 +01:00
ath10k_dbg ( ATH10K_DBG_HTT ,
" htt rx ignoring frame w/ status %d \n " ,
status ) ;
return false ;
}
if ( test_bit ( ATH10K_CAC_RUNNING , & htt - > ar - > dev_flags ) ) {
ath10k_dbg ( ATH10K_DBG_HTT ,
" htt rx CAC running \n " ) ;
return false ;
}
return true ;
}
2013-06-12 20:52:10 +03:00
static void ath10k_htt_rx_handler ( struct ath10k_htt * htt ,
struct htt_rx_indication * rx )
{
2014-03-24 21:24:57 +01:00
struct ieee80211_rx_status * rx_status = & htt - > rx_status ;
2013-06-12 20:52:10 +03:00
struct htt_rx_indication_mpdu_range * mpdu_ranges ;
2014-03-24 21:23:21 +01:00
struct htt_rx_desc * rxd ;
2014-03-24 21:23:19 +01:00
enum htt_rx_mpdu_status status ;
2013-06-12 20:52:10 +03:00
struct ieee80211_hdr * hdr ;
int num_mpdu_ranges ;
2014-03-24 21:23:21 +01:00
u32 attention ;
2013-06-12 20:52:10 +03:00
int fw_desc_len ;
u8 * fw_desc ;
2014-03-24 21:23:21 +01:00
bool channel_set ;
2013-06-12 20:52:10 +03:00
int i , j ;
2014-03-24 21:23:20 +01:00
int ret ;
2013-06-12 20:52:10 +03:00
2014-02-27 18:50:05 +02:00
lockdep_assert_held ( & htt - > rx_ring . lock ) ;
2013-06-12 20:52:10 +03:00
fw_desc_len = __le16_to_cpu ( rx - > prefix . fw_rx_desc_bytes ) ;
fw_desc = ( u8 * ) & rx - > fw_desc ;
num_mpdu_ranges = MS ( __le32_to_cpu ( rx - > hdr . info1 ) ,
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES ) ;
mpdu_ranges = htt_rx_ind_get_mpdu_ranges ( rx ) ;
2014-03-19 07:09:41 +01:00
/* Fill this once, while this is per-ppdu */
2014-03-24 21:24:58 +01:00
if ( rx - > ppdu . info0 & HTT_RX_INDICATION_INFO0_START_VALID ) {
memset ( rx_status , 0 , sizeof ( * rx_status ) ) ;
rx_status - > signal = ATH10K_DEFAULT_NOISE_FLOOR +
rx - > ppdu . combined_rssi ;
}
2014-03-24 21:23:19 +01:00
if ( rx - > ppdu . info0 & HTT_RX_INDICATION_INFO0_END_VALID ) {
/* TSF available only in 32-bit */
2014-03-24 21:24:57 +01:00
rx_status - > mactime = __le32_to_cpu ( rx - > ppdu . tsf ) & 0xffffffff ;
rx_status - > flag | = RX_FLAG_MACTIME_END ;
2014-03-24 21:23:19 +01:00
}
2014-03-19 07:09:41 +01:00
2014-03-24 21:24:57 +01:00
channel_set = ath10k_htt_rx_h_channel ( htt - > ar , rx_status ) ;
2014-03-24 21:23:18 +01:00
2014-03-24 21:23:19 +01:00
if ( channel_set ) {
2014-03-24 21:24:57 +01:00
ath10k_htt_rx_h_rates ( htt - > ar , rx_status - > band ,
2014-03-24 21:23:19 +01:00
rx - > ppdu . info0 ,
__le32_to_cpu ( rx - > ppdu . info1 ) ,
__le32_to_cpu ( rx - > ppdu . info2 ) ,
2014-03-24 21:24:57 +01:00
rx_status ) ;
2014-03-24 21:23:19 +01:00
}
2014-03-19 07:09:41 +01:00
2013-06-12 20:52:10 +03:00
ath10k_dbg_dump ( ATH10K_DBG_HTT_DUMP , NULL , " htt rx ind: " ,
rx , sizeof ( * rx ) +
( sizeof ( struct htt_rx_indication_mpdu_range ) *
num_mpdu_ranges ) ) ;
for ( i = 0 ; i < num_mpdu_ranges ; i + + ) {
2014-03-24 21:23:19 +01:00
status = mpdu_ranges [ i ] . mpdu_range_status ;
2013-06-12 20:52:10 +03:00
for ( j = 0 ; j < mpdu_ranges [ i ] . mpdu_count ; j + + ) {
struct sk_buff * msdu_head , * msdu_tail ;
msdu_head = NULL ;
msdu_tail = NULL ;
2014-03-24 21:23:20 +01:00
ret = ath10k_htt_rx_amsdu_pop ( htt ,
& fw_desc ,
& fw_desc_len ,
& msdu_head ,
& msdu_tail ) ;
if ( ret < 0 ) {
ath10k_warn ( " failed to pop amsdu from htt rx ring %d \n " ,
ret ) ;
ath10k_htt_rx_free_msdu_chain ( msdu_head ) ;
continue ;
}
2013-06-12 20:52:10 +03:00
2014-03-24 21:23:21 +01:00
rxd = container_of ( ( void * ) msdu_head - > data ,
struct htt_rx_desc ,
msdu_payload ) ;
attention = __le32_to_cpu ( rxd - > attention . flags ) ;
2014-03-19 07:09:40 +01:00
if ( ! ath10k_htt_rx_amsdu_allowed ( htt , msdu_head ,
2014-03-24 21:23:19 +01:00
status ,
2014-03-24 21:23:21 +01:00
channel_set ,
attention ) ) {
2013-11-20 09:59:47 +02:00
ath10k_htt_rx_free_msdu_chain ( msdu_head ) ;
continue ;
}
2014-03-24 21:23:20 +01:00
if ( ret > 0 & &
ath10k_unchain_msdu ( msdu_head ) < 0 ) {
2013-06-12 20:52:10 +03:00
ath10k_htt_rx_free_msdu_chain ( msdu_head ) ;
continue ;
}
2014-03-24 21:23:21 +01:00
if ( attention & RX_ATTENTION_FLAGS_FCS_ERR )
2014-03-24 21:24:57 +01:00
rx_status - > flag | = RX_FLAG_FAILED_FCS_CRC ;
2014-03-24 21:23:19 +01:00
else
2014-03-24 21:24:57 +01:00
rx_status - > flag & = ~ RX_FLAG_FAILED_FCS_CRC ;
2014-03-24 21:23:19 +01:00
2014-03-24 21:23:21 +01:00
if ( attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR )
2014-03-24 21:24:57 +01:00
rx_status - > flag | = RX_FLAG_MMIC_ERROR ;
2014-03-24 21:23:19 +01:00
else
2014-03-24 21:24:57 +01:00
rx_status - > flag & = ~ RX_FLAG_MMIC_ERROR ;
2014-03-24 21:23:19 +01:00
2013-06-12 20:52:10 +03:00
hdr = ath10k_htt_rx_skb_get_hdr ( msdu_head ) ;
if ( ath10k_htt_rx_hdr_is_amsdu ( hdr ) )
2014-03-24 21:24:57 +01:00
ath10k_htt_rx_amsdu ( htt , rx_status , msdu_head ) ;
2013-06-12 20:52:10 +03:00
else
2014-03-24 21:24:57 +01:00
ath10k_htt_rx_msdu ( htt , rx_status , msdu_head ) ;
2013-06-12 20:52:10 +03:00
}
}
2013-09-24 10:18:36 +02:00
tasklet_schedule ( & htt - > rx_replenish_task ) ;
2013-06-12 20:52:10 +03:00
}
static void ath10k_htt_rx_frag_handler ( struct ath10k_htt * htt ,
struct htt_rx_fragment_indication * frag )
{
struct sk_buff * msdu_head , * msdu_tail ;
2014-03-24 21:23:19 +01:00
enum htt_rx_mpdu_encrypt_type enctype ;
2013-06-12 20:52:10 +03:00
struct htt_rx_desc * rxd ;
enum rx_msdu_decap_format fmt ;
2014-03-24 21:24:57 +01:00
struct ieee80211_rx_status * rx_status = & htt - > rx_status ;
2013-06-12 20:52:10 +03:00
struct ieee80211_hdr * hdr ;
2014-03-24 21:23:20 +01:00
int ret ;
2013-06-12 20:52:10 +03:00
bool tkip_mic_err ;
bool decrypt_err ;
u8 * fw_desc ;
int fw_desc_len , hdrlen , paramlen ;
int trim ;
fw_desc_len = __le16_to_cpu ( frag - > fw_rx_desc_bytes ) ;
fw_desc = ( u8 * ) frag - > fw_msdu_rx_desc ;
msdu_head = NULL ;
msdu_tail = NULL ;
2014-02-27 18:50:05 +02:00
spin_lock_bh ( & htt - > rx_ring . lock ) ;
2014-03-24 21:23:20 +01:00
ret = ath10k_htt_rx_amsdu_pop ( htt , & fw_desc , & fw_desc_len ,
& msdu_head , & msdu_tail ) ;
2014-02-27 18:50:05 +02:00
spin_unlock_bh ( & htt - > rx_ring . lock ) ;
2013-06-12 20:52:10 +03:00
ath10k_dbg ( ATH10K_DBG_HTT_DUMP , " htt rx frag ahead \n " ) ;
2014-03-24 21:23:20 +01:00
if ( ret ) {
ath10k_warn ( " failed to pop amsdu from httr rx ring for fragmented rx %d \n " ,
ret ) ;
2013-06-12 20:52:10 +03:00
ath10k_htt_rx_free_msdu_chain ( msdu_head ) ;
return ;
}
/* FIXME: implement signal strength */
hdr = ( struct ieee80211_hdr * ) msdu_head - > data ;
rxd = ( void * ) msdu_head - > data - sizeof ( * rxd ) ;
tkip_mic_err = ! ! ( __le32_to_cpu ( rxd - > attention . flags ) &
RX_ATTENTION_FLAGS_TKIP_MIC_ERR ) ;
decrypt_err = ! ! ( __le32_to_cpu ( rxd - > attention . flags ) &
RX_ATTENTION_FLAGS_DECRYPT_ERR ) ;
fmt = MS ( __le32_to_cpu ( rxd - > msdu_start . info1 ) ,
RX_MSDU_START_INFO1_DECAP_FORMAT ) ;
if ( fmt ! = RX_MSDU_DECAP_RAW ) {
ath10k_warn ( " we dont support non-raw fragmented rx yet \n " ) ;
dev_kfree_skb_any ( msdu_head ) ;
goto end ;
}
2014-03-24 21:23:19 +01:00
enctype = MS ( __le32_to_cpu ( rxd - > mpdu_start . info0 ) ,
RX_MPDU_START_INFO0_ENCRYPT_TYPE ) ;
2014-03-24 21:24:57 +01:00
ath10k_htt_rx_h_protected ( htt , rx_status , msdu_head , enctype ) ;
2014-03-24 21:23:22 +01:00
msdu_head - > ip_summed = ath10k_htt_rx_get_csum_state ( msdu_head ) ;
2013-06-12 20:52:10 +03:00
2014-03-24 21:23:19 +01:00
if ( tkip_mic_err )
2013-06-12 20:52:10 +03:00
ath10k_warn ( " tkip mic error \n " ) ;
if ( decrypt_err ) {
ath10k_warn ( " decryption err in fragmented rx \n " ) ;
2014-03-24 21:23:22 +01:00
dev_kfree_skb_any ( msdu_head ) ;
2013-06-12 20:52:10 +03:00
goto end ;
}
2014-03-24 21:23:19 +01:00
if ( enctype ! = HTT_RX_MPDU_ENCRYPT_NONE ) {
2013-06-12 20:52:10 +03:00
hdrlen = ieee80211_hdrlen ( hdr - > frame_control ) ;
2014-03-24 21:23:19 +01:00
paramlen = ath10k_htt_rx_crypto_param_len ( enctype ) ;
2013-06-12 20:52:10 +03:00
/* It is more efficient to move the header than the payload */
2014-03-24 21:23:22 +01:00
memmove ( ( void * ) msdu_head - > data + paramlen ,
( void * ) msdu_head - > data ,
2013-06-12 20:52:10 +03:00
hdrlen ) ;
2014-03-24 21:23:22 +01:00
skb_pull ( msdu_head , paramlen ) ;
hdr = ( struct ieee80211_hdr * ) msdu_head - > data ;
2013-06-12 20:52:10 +03:00
}
/* remove trailing FCS */
trim = 4 ;
/* remove crypto trailer */
2014-03-24 21:23:19 +01:00
trim + = ath10k_htt_rx_crypto_tail_len ( enctype ) ;
2013-06-12 20:52:10 +03:00
/* last fragment of TKIP frags has MIC */
if ( ! ieee80211_has_morefrags ( hdr - > frame_control ) & &
2014-03-24 21:23:19 +01:00
enctype = = HTT_RX_MPDU_ENCRYPT_TKIP_WPA )
2013-06-12 20:52:10 +03:00
trim + = 8 ;
2014-03-24 21:23:22 +01:00
if ( trim > msdu_head - > len ) {
2013-06-12 20:52:10 +03:00
ath10k_warn ( " htt rx fragment: trailer longer than the frame itself? drop \n " ) ;
2014-03-24 21:23:22 +01:00
dev_kfree_skb_any ( msdu_head ) ;
2013-06-12 20:52:10 +03:00
goto end ;
}
2014-03-24 21:23:22 +01:00
skb_trim ( msdu_head , msdu_head - > len - trim ) ;
2013-06-12 20:52:10 +03:00
2014-02-05 13:58:34 -08:00
ath10k_dbg_dump ( ATH10K_DBG_HTT_DUMP , NULL , " htt rx frag mpdu: " ,
2014-03-24 21:23:22 +01:00
msdu_head - > data , msdu_head - > len ) ;
2014-03-24 21:24:57 +01:00
ath10k_process_rx ( htt - > ar , rx_status , msdu_head ) ;
2013-06-12 20:52:10 +03:00
end :
if ( fw_desc_len > 0 ) {
ath10k_dbg ( ATH10K_DBG_HTT ,
" expecting more fragmented rx in one indication %d \n " ,
fw_desc_len ) ;
}
}
2014-02-27 18:50:04 +02:00
static void ath10k_htt_rx_frm_tx_compl ( struct ath10k * ar ,
struct sk_buff * skb )
{
struct ath10k_htt * htt = & ar - > htt ;
struct htt_resp * resp = ( struct htt_resp * ) skb - > data ;
struct htt_tx_done tx_done = { } ;
int status = MS ( resp - > data_tx_completion . flags , HTT_DATA_TX_STATUS ) ;
__le16 msdu_id ;
int i ;
2014-02-27 18:50:05 +02:00
lockdep_assert_held ( & htt - > tx_lock ) ;
2014-02-27 18:50:04 +02:00
switch ( status ) {
case HTT_DATA_TX_STATUS_NO_ACK :
tx_done . no_ack = true ;
break ;
case HTT_DATA_TX_STATUS_OK :
break ;
case HTT_DATA_TX_STATUS_DISCARD :
case HTT_DATA_TX_STATUS_POSTPONE :
case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL :
tx_done . discard = true ;
break ;
default :
ath10k_warn ( " unhandled tx completion status %d \n " , status ) ;
tx_done . discard = true ;
break ;
}
ath10k_dbg ( ATH10K_DBG_HTT , " htt tx completion num_msdus %d \n " ,
resp - > data_tx_completion . num_msdus ) ;
for ( i = 0 ; i < resp - > data_tx_completion . num_msdus ; i + + ) {
msdu_id = resp - > data_tx_completion . msdus [ i ] ;
tx_done . msdu_id = __le16_to_cpu ( msdu_id ) ;
ath10k_txrx_tx_unref ( htt , & tx_done ) ;
}
}
2013-06-12 20:52:10 +03:00
void ath10k_htt_t2h_msg_handler ( struct ath10k * ar , struct sk_buff * skb )
{
2013-07-05 16:15:14 +03:00
struct ath10k_htt * htt = & ar - > htt ;
2013-06-12 20:52:10 +03:00
struct htt_resp * resp = ( struct htt_resp * ) skb - > data ;
/* confirm alignment */
if ( ! IS_ALIGNED ( ( unsigned long ) skb - > data , 4 ) )
ath10k_warn ( " unaligned htt message, expect trouble \n " ) ;
2014-02-05 13:58:34 -08:00
ath10k_dbg ( ATH10K_DBG_HTT , " htt rx, msg_type: 0x%0X \n " ,
2013-06-12 20:52:10 +03:00
resp - > hdr . msg_type ) ;
switch ( resp - > hdr . msg_type ) {
case HTT_T2H_MSG_TYPE_VERSION_CONF : {
htt - > target_version_major = resp - > ver_resp . major ;
htt - > target_version_minor = resp - > ver_resp . minor ;
complete ( & htt - > target_version_received ) ;
break ;
}
2014-02-27 18:50:04 +02:00
case HTT_T2H_MSG_TYPE_RX_IND :
2014-02-27 18:50:05 +02:00
spin_lock_bh ( & htt - > rx_ring . lock ) ;
__skb_queue_tail ( & htt - > rx_compl_q , skb ) ;
spin_unlock_bh ( & htt - > rx_ring . lock ) ;
2014-02-27 18:50:04 +02:00
tasklet_schedule ( & htt - > txrx_compl_task ) ;
return ;
2013-06-12 20:52:10 +03:00
case HTT_T2H_MSG_TYPE_PEER_MAP : {
struct htt_peer_map_event ev = {
. vdev_id = resp - > peer_map . vdev_id ,
. peer_id = __le16_to_cpu ( resp - > peer_map . peer_id ) ,
} ;
memcpy ( ev . addr , resp - > peer_map . addr , sizeof ( ev . addr ) ) ;
ath10k_peer_map_event ( htt , & ev ) ;
break ;
}
case HTT_T2H_MSG_TYPE_PEER_UNMAP : {
struct htt_peer_unmap_event ev = {
. peer_id = __le16_to_cpu ( resp - > peer_unmap . peer_id ) ,
} ;
ath10k_peer_unmap_event ( htt , & ev ) ;
break ;
}
case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION : {
struct htt_tx_done tx_done = { } ;
int status = __le32_to_cpu ( resp - > mgmt_tx_completion . status ) ;
tx_done . msdu_id =
__le32_to_cpu ( resp - > mgmt_tx_completion . desc_id ) ;
switch ( status ) {
case HTT_MGMT_TX_STATUS_OK :
break ;
case HTT_MGMT_TX_STATUS_RETRY :
tx_done . no_ack = true ;
break ;
case HTT_MGMT_TX_STATUS_DROP :
tx_done . discard = true ;
break ;
}
2014-02-27 18:50:04 +02:00
spin_lock_bh ( & htt - > tx_lock ) ;
2013-09-18 14:43:20 +02:00
ath10k_txrx_tx_unref ( htt , & tx_done ) ;
2014-02-27 18:50:04 +02:00
spin_unlock_bh ( & htt - > tx_lock ) ;
2013-06-12 20:52:10 +03:00
break ;
}
2014-02-27 18:50:04 +02:00
case HTT_T2H_MSG_TYPE_TX_COMPL_IND :
spin_lock_bh ( & htt - > tx_lock ) ;
__skb_queue_tail ( & htt - > tx_compl_q , skb ) ;
spin_unlock_bh ( & htt - > tx_lock ) ;
tasklet_schedule ( & htt - > txrx_compl_task ) ;
return ;
2013-06-12 20:52:10 +03:00
case HTT_T2H_MSG_TYPE_SEC_IND : {
struct ath10k * ar = htt - > ar ;
struct htt_security_indication * ev = & resp - > security_indication ;
ath10k_dbg ( ATH10K_DBG_HTT ,
" sec ind peer_id %d unicast %d type %d \n " ,
__le16_to_cpu ( ev - > peer_id ) ,
! ! ( ev - > flags & HTT_SECURITY_IS_UNICAST ) ,
MS ( ev - > flags , HTT_SECURITY_TYPE ) ) ;
complete ( & ar - > install_key_done ) ;
break ;
}
case HTT_T2H_MSG_TYPE_RX_FRAG_IND : {
ath10k_dbg_dump ( ATH10K_DBG_HTT_DUMP , NULL , " htt event: " ,
skb - > data , skb - > len ) ;
ath10k_htt_rx_frag_handler ( htt , & resp - > rx_frag_ind ) ;
break ;
}
case HTT_T2H_MSG_TYPE_TEST :
/* FIX THIS */
break ;
case HTT_T2H_MSG_TYPE_STATS_CONF :
2013-09-03 11:43:55 +03:00
trace_ath10k_htt_stats ( skb - > data , skb - > len ) ;
break ;
case HTT_T2H_MSG_TYPE_TX_INSPECT_IND :
2013-06-12 20:52:10 +03:00
case HTT_T2H_MSG_TYPE_RX_ADDBA :
case HTT_T2H_MSG_TYPE_RX_DELBA :
case HTT_T2H_MSG_TYPE_RX_FLUSH :
default :
ath10k_dbg ( ATH10K_DBG_HTT , " htt event (%d) not handled \n " ,
resp - > hdr . msg_type ) ;
ath10k_dbg_dump ( ATH10K_DBG_HTT_DUMP , NULL , " htt event: " ,
skb - > data , skb - > len ) ;
break ;
} ;
/* Free the indication buffer */
dev_kfree_skb_any ( skb ) ;
}
2014-02-27 18:50:04 +02:00
static void ath10k_htt_txrx_compl_task ( unsigned long ptr )
{
struct ath10k_htt * htt = ( struct ath10k_htt * ) ptr ;
struct htt_resp * resp ;
struct sk_buff * skb ;
2014-02-27 18:50:05 +02:00
spin_lock_bh ( & htt - > tx_lock ) ;
while ( ( skb = __skb_dequeue ( & htt - > tx_compl_q ) ) ) {
2014-02-27 18:50:04 +02:00
ath10k_htt_rx_frm_tx_compl ( htt - > ar , skb ) ;
dev_kfree_skb_any ( skb ) ;
}
2014-02-27 18:50:05 +02:00
spin_unlock_bh ( & htt - > tx_lock ) ;
2014-02-27 18:50:04 +02:00
2014-02-27 18:50:05 +02:00
spin_lock_bh ( & htt - > rx_ring . lock ) ;
while ( ( skb = __skb_dequeue ( & htt - > rx_compl_q ) ) ) {
2014-02-27 18:50:04 +02:00
resp = ( struct htt_resp * ) skb - > data ;
ath10k_htt_rx_handler ( htt , & resp - > rx_ind ) ;
dev_kfree_skb_any ( skb ) ;
}
2014-02-27 18:50:05 +02:00
spin_unlock_bh ( & htt - > rx_ring . lock ) ;
2014-02-27 18:50:04 +02:00
}