2008-08-04 00:16:41 -07:00
/*
2011-05-17 13:36:18 +05:30
* Copyright ( c ) 2008 - 2011 Atheros Communications Inc .
2008-08-04 00:16:41 -07:00
*
* Permission to use , copy , modify , and / or distribute this software for any
* purpose with or without fee is hereby granted , provided that the above
* copyright notice and this permission notice appear in all copies .
*
* THE SOFTWARE IS PROVIDED " AS IS " AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS . IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL , DIRECT , INDIRECT , OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
* ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE .
*/
2011-06-16 11:01:34 +00:00
# include <linux/dma-mapping.h>
2009-02-09 13:26:54 +05:30
# include "ath9k.h"
2010-04-15 17:39:28 -04:00
# include "ar9003_mac.h"
2008-08-04 00:16:41 -07:00
2013-10-11 23:30:52 +02:00
# define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb))
2010-04-15 17:38:48 -04:00
2010-05-22 23:58:13 -07:00
static inline bool ath9k_check_auto_sleep ( struct ath_softc * sc )
{
return sc - > ps_enabled & &
( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_AUTOSLEEP ) ;
}
2008-08-04 00:16:41 -07:00
/*
* Setup and link descriptors .
*
* 11 N : we can no longer afford to self link the last descriptor .
* MAC acknowledges BA status as long as it copies frames to host
* buffer ( or rx fifo ) . This can incorrectly acknowledge packets
* to a sender if last desc is self - linked .
*/
2014-05-23 19:21:34 +02:00
static void ath_rx_buf_link ( struct ath_softc * sc , struct ath_rxbuf * bf ,
bool flush )
2008-08-04 00:16:41 -07:00
{
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2009-11-04 09:11:34 -08:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-08-04 00:16:41 -07:00
struct ath_desc * ds ;
struct sk_buff * skb ;
ds = bf - > bf_desc ;
2008-11-18 09:05:55 +05:30
ds - > ds_link = 0 ; /* link to null */
2008-08-04 00:16:41 -07:00
ds - > ds_data = bf - > bf_buf_addr ;
2008-11-18 09:05:55 +05:30
/* virtual addr of the beginning of the buffer. */
2008-08-04 00:16:41 -07:00
skb = bf - > bf_mpdu ;
2009-09-13 23:28:00 -07:00
BUG_ON ( skb = = NULL ) ;
2008-08-04 00:16:41 -07:00
ds - > ds_vdata = skb - > data ;
2009-11-04 09:11:34 -08:00
/*
* setup rx descriptors . The rx_bufsize here tells the hardware
2008-11-20 17:15:13 -08:00
* how much data it can DMA to us and that we are prepared
2009-11-04 09:11:34 -08:00
* to process
*/
2008-12-07 21:44:03 +05:30
ath9k_hw_setuprxdesc ( ah , ds ,
2009-11-04 09:11:34 -08:00
common - > rx_bufsize ,
2008-08-04 00:16:41 -07:00
0 ) ;
2014-05-23 19:21:34 +02:00
if ( sc - > rx . rxlink )
2008-12-07 21:44:03 +05:30
* sc - > rx . rxlink = bf - > bf_daddr ;
2014-05-23 19:21:34 +02:00
else if ( ! flush )
ath9k_hw_putrxbuf ( ah , bf - > bf_daddr ) ;
2008-08-04 00:16:41 -07:00
2008-12-07 21:44:03 +05:30
sc - > rx . rxlink = & ds - > ds_link ;
2008-08-04 00:16:41 -07:00
}
2014-05-23 19:21:34 +02:00
static void ath_rx_buf_relink ( struct ath_softc * sc , struct ath_rxbuf * bf ,
bool flush )
2013-08-10 15:59:15 +02:00
{
if ( sc - > rx . buf_hold )
2014-05-23 19:21:34 +02:00
ath_rx_buf_link ( sc , sc - > rx . buf_hold , flush ) ;
2013-08-10 15:59:15 +02:00
sc - > rx . buf_hold = bf ;
}
2008-11-24 12:07:55 +05:30
static void ath_setdefantenna ( struct ath_softc * sc , u32 antenna )
{
/* XXX block beacon interrupts */
ath9k_hw_setantenna ( sc - > sc_ah , antenna ) ;
2008-12-07 21:44:03 +05:30
sc - > rx . defant = antenna ;
sc - > rx . rxotherant = 0 ;
2008-11-24 12:07:55 +05:30
}
2008-08-04 00:16:41 -07:00
static void ath_opmode_init ( struct ath_softc * sc )
{
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2009-09-10 09:22:37 -07:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-08-04 00:16:41 -07:00
u32 rfilt , mfilt [ 2 ] ;
/* configure rx filter */
rfilt = ath_calcrxfilter ( sc ) ;
ath9k_hw_setrxfilter ( ah , rfilt ) ;
/* configure bssid mask */
2010-09-14 20:22:44 +02:00
ath_hw_setbssidmask ( common ) ;
2008-08-04 00:16:41 -07:00
/* configure operational mode */
ath9k_hw_setopmode ( ah ) ;
/* calculate and install multicast filter */
mfilt [ 0 ] = mfilt [ 1 ] = ~ 0 ;
ath9k_hw_setmcastfilter ( ah , mfilt [ 0 ] , mfilt [ 1 ] ) ;
}
2010-04-15 17:38:48 -04:00
static bool ath_rx_edma_buf_link ( struct ath_softc * sc ,
enum ath9k_rx_qtype qtype )
2008-08-04 00:16:41 -07:00
{
2010-04-15 17:38:48 -04:00
struct ath_hw * ah = sc - > sc_ah ;
struct ath_rx_edma * rx_edma ;
2008-08-04 00:16:41 -07:00
struct sk_buff * skb ;
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * bf ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
rx_edma = & sc - > rx . rx_edma [ qtype ] ;
if ( skb_queue_len ( & rx_edma - > rx_fifo ) > = rx_edma - > rx_fifo_hwsize )
return false ;
2008-08-04 00:16:41 -07:00
2013-10-11 23:30:52 +02:00
bf = list_first_entry ( & sc - > rx . rxbuf , struct ath_rxbuf , list ) ;
2010-04-15 17:38:48 -04:00
list_del_init ( & bf - > list ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
skb = bf - > bf_mpdu ;
memset ( skb - > data , 0 , ah - > caps . rx_status_len ) ;
dma_sync_single_for_device ( sc - > dev , bf - > bf_buf_addr ,
ah - > caps . rx_status_len , DMA_TO_DEVICE ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
SKB_CB_ATHBUF ( skb ) = bf ;
ath9k_hw_addrxbuf_edma ( ah , bf - > bf_buf_addr , qtype ) ;
2013-04-23 12:22:18 +05:30
__skb_queue_tail ( & rx_edma - > rx_fifo , skb ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
return true ;
}
static void ath_rx_addbuffer_edma ( struct ath_softc * sc ,
2013-04-23 12:22:16 +05:30
enum ath9k_rx_qtype qtype )
2010-04-15 17:38:48 -04:00
{
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * bf , * tbf ;
2010-04-15 17:38:48 -04:00
if ( list_empty ( & sc - > rx . rxbuf ) ) {
2011-12-15 14:55:53 -08:00
ath_dbg ( common , QUEUE , " No free rx buf available \n " ) ;
2010-04-15 17:38:48 -04:00
return ;
2009-03-30 15:28:45 +05:30
}
2008-08-04 00:16:41 -07:00
2012-02-28 20:54:44 +05:30
list_for_each_entry_safe ( bf , tbf , & sc - > rx . rxbuf , list )
2010-04-15 17:38:48 -04:00
if ( ! ath_rx_edma_buf_link ( sc , qtype ) )
break ;
}
static void ath_rx_remove_buffer ( struct ath_softc * sc ,
enum ath9k_rx_qtype qtype )
{
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * bf ;
2010-04-15 17:38:48 -04:00
struct ath_rx_edma * rx_edma ;
struct sk_buff * skb ;
rx_edma = & sc - > rx . rx_edma [ qtype ] ;
2013-04-23 12:22:18 +05:30
while ( ( skb = __skb_dequeue ( & rx_edma - > rx_fifo ) ) ! = NULL ) {
2010-04-15 17:38:48 -04:00
bf = SKB_CB_ATHBUF ( skb ) ;
BUG_ON ( ! bf ) ;
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
}
}
static void ath_rx_edma_cleanup ( struct ath_softc * sc )
{
2011-09-23 14:33:14 +05:30
struct ath_hw * ah = sc - > sc_ah ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * bf ;
2010-04-15 17:38:48 -04:00
ath_rx_remove_buffer ( sc , ATH9K_RX_QUEUE_LP ) ;
ath_rx_remove_buffer ( sc , ATH9K_RX_QUEUE_HP ) ;
2009-03-30 15:28:45 +05:30
list_for_each_entry ( bf , & sc - > rx . rxbuf , list ) {
2011-09-23 14:33:14 +05:30
if ( bf - > bf_mpdu ) {
dma_unmap_single ( sc - > dev , bf - > bf_buf_addr ,
common - > rx_bufsize ,
DMA_BIDIRECTIONAL ) ;
2010-04-15 17:38:48 -04:00
dev_kfree_skb_any ( bf - > bf_mpdu ) ;
2011-09-23 14:33:14 +05:30
bf - > bf_buf_addr = 0 ;
bf - > bf_mpdu = NULL ;
}
2010-04-15 17:38:48 -04:00
}
}
static void ath_rx_edma_init_queue ( struct ath_rx_edma * rx_edma , int size )
{
2013-08-14 21:15:57 +05:30
__skb_queue_head_init ( & rx_edma - > rx_fifo ) ;
2010-04-15 17:38:48 -04:00
rx_edma - > rx_fifo_hwsize = size ;
}
static int ath_rx_edma_init ( struct ath_softc * sc , int nbufs )
{
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
struct ath_hw * ah = sc - > sc_ah ;
struct sk_buff * skb ;
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * bf ;
2010-04-15 17:38:48 -04:00
int error = 0 , i ;
u32 size ;
ath9k_hw_set_rx_bufsize ( ah , common - > rx_bufsize -
ah - > caps . rx_status_len ) ;
ath_rx_edma_init_queue ( & sc - > rx . rx_edma [ ATH9K_RX_QUEUE_LP ] ,
ah - > caps . rx_lp_qdepth ) ;
ath_rx_edma_init_queue ( & sc - > rx . rx_edma [ ATH9K_RX_QUEUE_HP ] ,
ah - > caps . rx_hp_qdepth ) ;
2013-10-11 23:30:52 +02:00
size = sizeof ( struct ath_rxbuf ) * nbufs ;
2012-12-12 13:14:22 +01:00
bf = devm_kzalloc ( sc - > dev , size , GFP_KERNEL ) ;
2010-04-15 17:38:48 -04:00
if ( ! bf )
return - ENOMEM ;
INIT_LIST_HEAD ( & sc - > rx . rxbuf ) ;
for ( i = 0 ; i < nbufs ; i + + , bf + + ) {
2009-11-04 09:11:34 -08:00
skb = ath_rxbuf_alloc ( common , common - > rx_bufsize , GFP_KERNEL ) ;
2010-04-15 17:38:48 -04:00
if ( ! skb ) {
2009-03-30 15:28:45 +05:30
error = - ENOMEM ;
2010-04-15 17:38:48 -04:00
goto rx_init_fail ;
2008-08-04 00:16:41 -07:00
}
2010-04-15 17:38:48 -04:00
memset ( skb - > data , 0 , common - > rx_bufsize ) ;
2009-03-30 15:28:45 +05:30
bf - > bf_mpdu = skb ;
2010-04-15 17:38:48 -04:00
2009-03-30 15:28:45 +05:30
bf - > bf_buf_addr = dma_map_single ( sc - > dev , skb - > data ,
2009-11-04 09:11:34 -08:00
common - > rx_bufsize ,
2010-04-15 17:38:48 -04:00
DMA_BIDIRECTIONAL ) ;
2009-03-30 15:28:45 +05:30
if ( unlikely ( dma_mapping_error ( sc - > dev ,
2010-04-15 17:38:48 -04:00
bf - > bf_buf_addr ) ) ) {
dev_kfree_skb_any ( skb ) ;
bf - > bf_mpdu = NULL ;
2010-10-14 12:45:30 -07:00
bf - > bf_buf_addr = 0 ;
2010-12-02 19:12:36 -08:00
ath_err ( common ,
2010-04-15 17:38:48 -04:00
" dma_mapping_error() on RX init \n " ) ;
error = - ENOMEM ;
goto rx_init_fail ;
}
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
}
return 0 ;
rx_init_fail :
ath_rx_edma_cleanup ( sc ) ;
return error ;
}
static void ath_edma_start_recv ( struct ath_softc * sc )
{
ath9k_hw_rxena ( sc - > sc_ah ) ;
2013-04-23 12:22:16 +05:30
ath_rx_addbuffer_edma ( sc , ATH9K_RX_QUEUE_HP ) ;
ath_rx_addbuffer_edma ( sc , ATH9K_RX_QUEUE_LP ) ;
2010-04-15 17:38:48 -04:00
ath_opmode_init ( sc ) ;
2014-06-11 16:17:49 +05:30
ath9k_hw_startpcureceive ( sc - > sc_ah , sc - > cur_chan - > offchannel ) ;
2010-04-15 17:38:48 -04:00
}
static void ath_edma_stop_recv ( struct ath_softc * sc )
{
ath_rx_remove_buffer ( sc , ATH9K_RX_QUEUE_HP ) ;
ath_rx_remove_buffer ( sc , ATH9K_RX_QUEUE_LP ) ;
}
int ath_rx_init ( struct ath_softc * sc , int nbufs )
{
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
struct sk_buff * skb ;
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * bf ;
2010-04-15 17:38:48 -04:00
int error = 0 ;
2010-10-26 15:27:24 -07:00
spin_lock_init ( & sc - > sc_pcu_lock ) ;
2010-04-15 17:38:48 -04:00
2011-01-26 18:23:27 +01:00
common - > rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
sc - > sc_ah - > caps . rx_status_len ;
2013-04-23 12:22:17 +05:30
if ( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA )
2010-04-15 17:38:48 -04:00
return ath_rx_edma_init ( sc , nbufs ) ;
2013-04-23 12:22:17 +05:30
ath_dbg ( common , CONFIG , " cachelsz %u rxbufsize %u \n " ,
common - > cachelsz , common - > rx_bufsize ) ;
2010-04-15 17:38:48 -04:00
2013-04-23 12:22:17 +05:30
/* Initialize rx descriptors */
error = ath_descdma_setup ( sc , & sc - > rx . rxdma , & sc - > rx . rxbuf ,
" rx " , nbufs , 1 , 0 ) ;
if ( error ! = 0 ) {
ath_err ( common ,
" failed to allocate rx descriptors: %d \n " ,
error ) ;
goto err ;
}
list_for_each_entry ( bf , & sc - > rx . rxbuf , list ) {
skb = ath_rxbuf_alloc ( common , common - > rx_bufsize ,
GFP_KERNEL ) ;
if ( skb = = NULL ) {
error = - ENOMEM ;
2009-03-30 15:28:45 +05:30
goto err ;
}
2010-04-15 17:38:48 -04:00
2013-04-23 12:22:17 +05:30
bf - > bf_mpdu = skb ;
bf - > bf_buf_addr = dma_map_single ( sc - > dev , skb - > data ,
common - > rx_bufsize ,
DMA_FROM_DEVICE ) ;
if ( unlikely ( dma_mapping_error ( sc - > dev ,
bf - > bf_buf_addr ) ) ) {
dev_kfree_skb_any ( skb ) ;
bf - > bf_mpdu = NULL ;
bf - > bf_buf_addr = 0 ;
ath_err ( common ,
" dma_mapping_error() on RX init \n " ) ;
error = - ENOMEM ;
goto err ;
2010-04-15 17:38:48 -04:00
}
2009-03-30 15:28:45 +05:30
}
2013-04-23 12:22:17 +05:30
sc - > rx . rxlink = NULL ;
2009-03-30 15:28:45 +05:30
err :
2008-08-04 00:16:41 -07:00
if ( error )
ath_rx_cleanup ( sc ) ;
return error ;
}
void ath_rx_cleanup ( struct ath_softc * sc )
{
2009-11-04 09:11:34 -08:00
struct ath_hw * ah = sc - > sc_ah ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-08-04 00:16:41 -07:00
struct sk_buff * skb ;
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * bf ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
if ( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA ) {
ath_rx_edma_cleanup ( sc ) ;
return ;
2013-04-23 12:22:17 +05:30
}
list_for_each_entry ( bf , & sc - > rx . rxbuf , list ) {
skb = bf - > bf_mpdu ;
if ( skb ) {
dma_unmap_single ( sc - > dev , bf - > bf_buf_addr ,
common - > rx_bufsize ,
DMA_FROM_DEVICE ) ;
dev_kfree_skb ( skb ) ;
bf - > bf_buf_addr = 0 ;
bf - > bf_mpdu = NULL ;
2009-03-23 18:25:01 -04:00
}
2010-04-15 17:38:48 -04:00
}
2008-08-04 00:16:41 -07:00
}
/*
* Calculate the receive filter according to the
* operating mode and state :
*
* o always accept unicast , broadcast , and multicast traffic
* o maintain current state of phy error reception ( the hal
* may enable phy error frames for noise immunity work )
* o probe request frames are accepted only when operating in
* hostap , adhoc , or monitor modes
* o enable promiscuous mode according to the interface state
* o accept beacons :
* - when operating in adhoc mode so the 802.11 layer creates
* node table entries for peers ,
* - when operating in station mode for collecting rssi data when
* the station is otherwise quiet , or
* - when operating as a repeater so we see repeater - sta beacons
* - when scanning
*/
u32 ath_calcrxfilter ( struct ath_softc * sc )
{
2014-06-11 16:17:55 +05:30
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
2008-08-04 00:16:41 -07:00
u32 rfilt ;
2013-10-14 17:42:11 -07:00
if ( config_enabled ( CONFIG_ATH9K_TX99 ) )
return 0 ;
2011-10-08 15:49:57 +02:00
rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
2008-08-04 00:16:41 -07:00
| ATH9K_RX_FILTER_MCAST ;
2013-04-03 18:31:31 +02:00
/* if operating on a DFS channel, enable radar pulse detection */
if ( sc - > hw - > conf . radar_enabled )
rfilt | = ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR ;
2014-09-05 08:03:18 +05:30
spin_lock_bh ( & sc - > chan_lock ) ;
if ( sc - > cur_chan - > rxfilter & FIF_PROBE_REQ )
2008-08-04 00:16:41 -07:00
rfilt | = ATH9K_RX_FILTER_PROBEREQ ;
2011-03-09 01:48:12 +01:00
if ( sc - > sc_ah - > is_monitoring )
2008-08-04 00:16:41 -07:00
rfilt | = ATH9K_RX_FILTER_PROM ;
2014-09-16 02:13:13 +02:00
if ( ( sc - > cur_chan - > rxfilter & FIF_CONTROL ) | |
sc - > sc_ah - > dynack . enabled )
2009-02-04 08:10:22 +05:30
rfilt | = ATH9K_RX_FILTER_CONTROL ;
2009-02-19 15:41:52 +05:30
if ( ( sc - > sc_ah - > opmode = = NL80211_IFTYPE_STATION ) & &
2014-09-05 08:03:19 +05:30
( sc - > cur_chan - > nvifs < = 1 ) & &
2014-09-05 08:03:18 +05:30
! ( sc - > cur_chan - > rxfilter & FIF_BCN_PRBRESP_PROMISC ) )
2009-02-19 15:41:52 +05:30
rfilt | = ATH9K_RX_FILTER_MYBEACON ;
2015-09-17 14:03:46 +02:00
else if ( sc - > sc_ah - > opmode ! = NL80211_IFTYPE_OCB )
2008-08-04 00:16:41 -07:00
rfilt | = ATH9K_RX_FILTER_BEACON ;
2011-04-07 19:24:23 +02:00
if ( ( sc - > sc_ah - > opmode = = NL80211_IFTYPE_AP ) | |
2014-09-05 08:03:18 +05:30
( sc - > cur_chan - > rxfilter & FIF_PSPOLL ) )
2009-02-19 15:41:52 +05:30
rfilt | = ATH9K_RX_FILTER_PSPOLL ;
2008-11-18 09:05:55 +05:30
2014-09-05 08:03:17 +05:30
if ( sc - > cur_chandef . width ! = NL80211_CHAN_WIDTH_20_NOHT )
2009-09-03 12:08:43 +05:30
rfilt | = ATH9K_RX_FILTER_COMP_BAR ;
2014-09-05 08:03:19 +05:30
if ( sc - > cur_chan - > nvifs > 1 | | ( sc - > cur_chan - > rxfilter & FIF_OTHER_BSS ) ) {
2012-09-25 21:32:55 +05:30
/* This is needed for older chips */
if ( sc - > sc_ah - > hw_version . macVersion < = AR_SREV_VERSION_9160 )
2009-08-20 19:12:07 -07:00
rfilt | = ATH9K_RX_FILTER_PROM ;
2009-03-03 19:23:30 +02:00
rfilt | = ATH9K_RX_FILTER_MCAST_BCAST_ALL ;
}
2014-12-19 06:33:59 +05:30
if ( AR_SREV_9550 ( sc - > sc_ah ) | | AR_SREV_9531 ( sc - > sc_ah ) | |
AR_SREV_9561 ( sc - > sc_ah ) )
2012-07-03 19:13:33 +02:00
rfilt | = ATH9K_RX_FILTER_4ADDRESS ;
2015-08-20 09:21:38 +08:00
if ( AR_SREV_9462 ( sc - > sc_ah ) | | AR_SREV_9565 ( sc - > sc_ah ) )
rfilt | = ATH9K_RX_FILTER_CONTROL_WRAPPER ;
2014-08-22 20:39:31 +05:30
if ( ath9k_is_chanctx_enabled ( ) & &
2014-06-11 16:17:55 +05:30
test_bit ( ATH_OP_SCANNING , & common - > op_flags ) )
rfilt | = ATH9K_RX_FILTER_BEACON ;
2014-09-05 08:03:18 +05:30
spin_unlock_bh ( & sc - > chan_lock ) ;
2008-08-04 00:16:41 -07:00
return rfilt ;
2008-08-11 14:03:13 +05:30
2008-08-04 00:16:41 -07:00
}
2014-09-05 08:03:16 +05:30
void ath_startrecv ( struct ath_softc * sc )
2008-08-04 00:16:41 -07:00
{
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * bf , * tbf ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
if ( ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA ) {
ath_edma_start_recv ( sc ) ;
2014-09-05 08:03:16 +05:30
return ;
2010-04-15 17:38:48 -04:00
}
2008-12-07 21:44:03 +05:30
if ( list_empty ( & sc - > rx . rxbuf ) )
2008-08-04 00:16:41 -07:00
goto start_recv ;
2013-08-10 15:59:15 +02:00
sc - > rx . buf_hold = NULL ;
2008-12-07 21:44:03 +05:30
sc - > rx . rxlink = NULL ;
list_for_each_entry_safe ( bf , tbf , & sc - > rx . rxbuf , list ) {
2014-05-23 19:21:34 +02:00
ath_rx_buf_link ( sc , bf , false ) ;
2008-08-04 00:16:41 -07:00
}
/* We could have deleted elements so the list may be empty now */
2008-12-07 21:44:03 +05:30
if ( list_empty ( & sc - > rx . rxbuf ) )
2008-08-04 00:16:41 -07:00
goto start_recv ;
2013-10-11 23:30:52 +02:00
bf = list_first_entry ( & sc - > rx . rxbuf , struct ath_rxbuf , list ) ;
2008-08-04 00:16:41 -07:00
ath9k_hw_putrxbuf ( ah , bf - > bf_daddr ) ;
2008-11-18 09:05:55 +05:30
ath9k_hw_rxena ( ah ) ;
2008-08-04 00:16:41 -07:00
start_recv :
2008-11-18 09:05:55 +05:30
ath_opmode_init ( sc ) ;
2014-06-11 16:17:49 +05:30
ath9k_hw_startpcureceive ( ah , sc - > cur_chan - > offchannel ) ;
2008-08-04 00:16:41 -07:00
}
2013-01-09 16:16:56 +01:00
static void ath_flushrecv ( struct ath_softc * sc )
{
if ( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA )
ath_rx_tasklet ( sc , 1 , true ) ;
ath_rx_tasklet ( sc , 1 , false ) ;
}
2008-08-04 00:16:41 -07:00
bool ath_stoprecv ( struct ath_softc * sc )
{
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2011-04-08 20:13:18 +02:00
bool stopped , reset = false ;
2008-08-04 00:16:41 -07:00
2010-11-20 03:08:47 +01:00
ath9k_hw_abortpcurecv ( ah ) ;
2008-11-18 09:05:55 +05:30
ath9k_hw_setrxfilter ( ah , 0 ) ;
2011-04-08 20:13:18 +02:00
stopped = ath9k_hw_stopdmarecv ( ah , & reset ) ;
2010-04-15 17:38:48 -04:00
2013-01-09 16:16:56 +01:00
ath_flushrecv ( sc ) ;
2010-04-15 17:38:48 -04:00
if ( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA )
ath_edma_stop_recv ( sc ) ;
else
sc - > rx . rxlink = NULL ;
2008-11-18 09:05:55 +05:30
2010-12-20 14:39:51 +05:30
if ( ! ( ah - > ah_flags & AH_UNPLUGGED ) & &
unlikely ( ! stopped ) ) {
2015-07-02 13:40:29 +02:00
ath_dbg ( ath9k_hw_common ( sc - > sc_ah ) , RESET ,
" Failed to stop Rx DMA \n " ) ;
RESET_STAT_INC ( sc , RESET_RX_DMA_ERROR ) ;
2010-12-06 13:13:07 -08:00
}
2011-04-15 00:41:43 +02:00
return stopped & & ! reset ;
2008-08-04 00:16:41 -07:00
}
2009-05-14 21:28:48 +03:00
static bool ath_beacon_dtim_pending_cab ( struct sk_buff * skb )
{
/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
struct ieee80211_mgmt * mgmt ;
u8 * pos , * end , id , elen ;
struct ieee80211_tim_ie * tim ;
mgmt = ( struct ieee80211_mgmt * ) skb - > data ;
pos = mgmt - > u . beacon . variable ;
end = skb - > data + skb - > len ;
while ( pos + 2 < end ) {
id = * pos + + ;
elen = * pos + + ;
if ( pos + elen > end )
break ;
if ( id = = WLAN_EID_TIM ) {
if ( elen < sizeof ( * tim ) )
break ;
tim = ( struct ieee80211_tim_ie * ) pos ;
if ( tim - > dtim_count ! = 0 )
break ;
return tim - > bitmap_ctrl & 0x01 ;
}
pos + = elen ;
}
return false ;
}
static void ath_rx_ps_beacon ( struct ath_softc * sc , struct sk_buff * skb )
{
2009-09-10 09:22:37 -07:00
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
2014-09-12 12:10:48 +05:30
bool skip_beacon = false ;
2009-05-14 21:28:48 +03:00
if ( skb - > len < 24 + 8 + 2 + 2 )
return ;
2010-01-08 10:36:05 +05:30
sc - > ps_flags & = ~ PS_WAIT_FOR_BEACON ;
2009-06-19 12:17:48 +02:00
2010-01-08 10:36:05 +05:30
if ( sc - > ps_flags & PS_BEACON_SYNC ) {
sc - > ps_flags & = ~ PS_BEACON_SYNC ;
2011-12-15 14:55:53 -08:00
ath_dbg ( common , PS ,
2013-02-04 15:38:24 +05:30
" Reconfigure beacon timers based on synchronized timestamp \n " ) ;
2014-09-12 12:10:48 +05:30
2014-09-16 07:19:27 +05:30
# ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
2014-09-12 12:10:48 +05:30
if ( ath9k_is_chanctx_enabled ( ) ) {
if ( sc - > cur_chan = = & sc - > offchannel . chan )
skip_beacon = true ;
}
2014-09-16 07:19:27 +05:30
# endif
2014-09-12 12:10:48 +05:30
if ( ! skip_beacon & &
! ( WARN_ON_ONCE ( sc - > cur_chan - > beacon . beacon_interval = = 0 ) ) )
2014-04-30 12:02:05 -07:00
ath9k_set_beacon ( sc ) ;
2014-08-22 20:39:30 +05:30
ath9k_p2p_beacon_sync ( sc ) ;
2009-05-20 21:59:08 +03:00
}
2009-05-14 21:28:48 +03:00
if ( ath_beacon_dtim_pending_cab ( skb ) ) {
/*
* Remain awake waiting for buffered broadcast / multicast
2009-06-17 20:53:20 +02:00
* frames . If the last broadcast / multicast frame is not
* received properly , the next beacon frame will work as
* a backup trigger for returning into NETWORK SLEEP state ,
* so we are waiting for it as well .
2009-05-14 21:28:48 +03:00
*/
2011-12-15 14:55:53 -08:00
ath_dbg ( common , PS ,
2010-12-02 19:12:37 -08:00
" Received DTIM beacon indicating buffered broadcast/multicast frame(s) \n " ) ;
2010-01-08 10:36:05 +05:30
sc - > ps_flags | = PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON ;
2009-05-14 21:28:48 +03:00
return ;
}
2010-01-08 10:36:05 +05:30
if ( sc - > ps_flags & PS_WAIT_FOR_CAB ) {
2009-05-14 21:28:48 +03:00
/*
* This can happen if a broadcast frame is dropped or the AP
* fails to send a frame indicating that all CAB frames have
* been delivered .
*/
2010-01-08 10:36:05 +05:30
sc - > ps_flags & = ~ PS_WAIT_FOR_CAB ;
2011-12-15 14:55:53 -08:00
ath_dbg ( common , PS , " PS wait for CAB frames timed out \n " ) ;
2009-05-14 21:28:48 +03:00
}
}
2011-09-26 22:16:56 +05:30
static void ath_rx_ps ( struct ath_softc * sc , struct sk_buff * skb , bool mybeacon )
2009-05-14 21:28:48 +03:00
{
struct ieee80211_hdr * hdr ;
2009-09-13 02:42:02 -07:00
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
2009-05-14 21:28:48 +03:00
hdr = ( struct ieee80211_hdr * ) skb - > data ;
/* Process Beacon and CAB receive in PS state */
2010-05-22 23:58:13 -07:00
if ( ( ( sc - > ps_flags & PS_WAIT_FOR_BEACON ) | | ath9k_check_auto_sleep ( sc ) )
2012-06-04 20:24:07 +05:30
& & mybeacon ) {
2009-05-14 21:28:48 +03:00
ath_rx_ps_beacon ( sc , skb ) ;
2012-06-04 20:24:07 +05:30
} else if ( ( sc - > ps_flags & PS_WAIT_FOR_CAB ) & &
( ieee80211_is_data ( hdr - > frame_control ) | |
ieee80211_is_action ( hdr - > frame_control ) ) & &
is_multicast_ether_addr ( hdr - > addr1 ) & &
! ieee80211_has_moredata ( hdr - > frame_control ) ) {
2009-05-14 21:28:48 +03:00
/*
* No more broadcast / multicast frames to be received at this
* point .
*/
2010-09-16 15:12:35 -04:00
sc - > ps_flags & = ~ ( PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON ) ;
2011-12-15 14:55:53 -08:00
ath_dbg ( common , PS ,
2010-12-02 19:12:37 -08:00
" All PS CAB frames received, back to sleep \n " ) ;
2010-01-08 10:36:05 +05:30
} else if ( ( sc - > ps_flags & PS_WAIT_FOR_PSPOLL_DATA ) & &
2009-05-19 17:01:38 +03:00
! is_multicast_ether_addr ( hdr - > addr1 ) & &
! ieee80211_has_morefrags ( hdr - > frame_control ) ) {
2010-01-08 10:36:05 +05:30
sc - > ps_flags & = ~ PS_WAIT_FOR_PSPOLL_DATA ;
2011-12-15 14:55:53 -08:00
ath_dbg ( common , PS ,
2010-12-02 19:12:37 -08:00
" Going back to sleep after having received PS-Poll data (0x%lx) \n " ,
2010-01-08 10:36:05 +05:30
sc - > ps_flags & ( PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK ) ) ;
2009-05-14 21:28:48 +03:00
}
}
2010-04-15 17:38:48 -04:00
static bool ath_edma_get_buffers ( struct ath_softc * sc ,
2012-03-03 15:17:05 +01:00
enum ath9k_rx_qtype qtype ,
struct ath_rx_status * rs ,
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * * dest )
2008-08-04 00:16:41 -07:00
{
2010-04-15 17:38:48 -04:00
struct ath_rx_edma * rx_edma = & sc - > rx . rx_edma [ qtype ] ;
struct ath_hw * ah = sc - > sc_ah ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
struct sk_buff * skb ;
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * bf ;
2010-04-15 17:38:48 -04:00
int ret ;
skb = skb_peek ( & rx_edma - > rx_fifo ) ;
if ( ! skb )
return false ;
bf = SKB_CB_ATHBUF ( skb ) ;
BUG_ON ( ! bf ) ;
2010-05-15 18:25:40 +08:00
dma_sync_single_for_cpu ( sc - > dev , bf - > bf_buf_addr ,
2010-04-15 17:38:48 -04:00
common - > rx_bufsize , DMA_FROM_DEVICE ) ;
2012-03-03 15:17:05 +01:00
ret = ath9k_hw_process_rxdesc_edma ( ah , rs , skb - > data ) ;
2010-05-15 18:25:40 +08:00
if ( ret = = - EINPROGRESS ) {
/*let device gain the buffer again*/
dma_sync_single_for_device ( sc - > dev , bf - > bf_buf_addr ,
common - > rx_bufsize , DMA_FROM_DEVICE ) ;
2010-04-15 17:38:48 -04:00
return false ;
2010-05-15 18:25:40 +08:00
}
2010-04-15 17:38:48 -04:00
__skb_unlink ( skb , & rx_edma - > rx_fifo ) ;
if ( ret = = - EINVAL ) {
/* corrupt descriptor, skip this one and the following one */
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
ath_rx_edma_buf_link ( sc , qtype ) ;
2012-03-03 15:17:05 +01:00
skb = skb_peek ( & rx_edma - > rx_fifo ) ;
if ( skb ) {
bf = SKB_CB_ATHBUF ( skb ) ;
BUG_ON ( ! bf ) ;
__skb_unlink ( skb , & rx_edma - > rx_fifo ) ;
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
ath_rx_edma_buf_link ( sc , qtype ) ;
}
2012-06-27 18:21:15 +01:00
bf = NULL ;
2010-04-15 17:38:48 -04:00
}
2012-03-03 15:17:05 +01:00
* dest = bf ;
2010-04-15 17:38:48 -04:00
return true ;
}
2008-08-04 00:16:41 -07:00
2013-10-11 23:30:52 +02:00
static struct ath_rxbuf * ath_edma_get_next_rx_buf ( struct ath_softc * sc ,
2010-04-15 17:38:48 -04:00
struct ath_rx_status * rs ,
enum ath9k_rx_qtype qtype )
{
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * bf = NULL ;
2010-04-15 17:38:48 -04:00
2012-03-03 15:17:05 +01:00
while ( ath_edma_get_buffers ( sc , qtype , rs , & bf ) ) {
if ( ! bf )
continue ;
2010-04-15 17:38:48 -04:00
2012-03-03 15:17:05 +01:00
return bf ;
}
return NULL ;
2010-04-15 17:38:48 -04:00
}
2013-10-11 23:30:52 +02:00
static struct ath_rxbuf * ath_get_next_rx_buf ( struct ath_softc * sc ,
2010-04-15 17:38:48 -04:00
struct ath_rx_status * rs )
{
struct ath_hw * ah = sc - > sc_ah ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-08-04 00:16:41 -07:00
struct ath_desc * ds ;
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * bf ;
2010-04-15 17:38:48 -04:00
int ret ;
if ( list_empty ( & sc - > rx . rxbuf ) ) {
sc - > rx . rxlink = NULL ;
return NULL ;
}
2013-10-11 23:30:52 +02:00
bf = list_first_entry ( & sc - > rx . rxbuf , struct ath_rxbuf , list ) ;
2013-08-10 15:59:15 +02:00
if ( bf = = sc - > rx . buf_hold )
return NULL ;
2010-04-15 17:38:48 -04:00
ds = bf - > bf_desc ;
/*
* Must provide the virtual address of the current
* descriptor , the physical address , and the virtual
* address of the next descriptor in the h / w chain .
* This allows the HAL to look ahead to see if the
* hardware is done with a descriptor by checking the
* done bit in the following descriptor and the address
* of the current descriptor the DMA engine is working
* on . All this is necessary because of our use of
* a self - linked list to avoid rx overruns .
*/
2011-08-13 10:28:11 +05:30
ret = ath9k_hw_rxprocdesc ( ah , ds , rs ) ;
2010-04-15 17:38:48 -04:00
if ( ret = = - EINPROGRESS ) {
struct ath_rx_status trs ;
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * tbf ;
2010-04-15 17:38:48 -04:00
struct ath_desc * tds ;
memset ( & trs , 0 , sizeof ( trs ) ) ;
if ( list_is_last ( & bf - > list , & sc - > rx . rxbuf ) ) {
sc - > rx . rxlink = NULL ;
return NULL ;
}
2013-10-11 23:30:52 +02:00
tbf = list_entry ( bf - > list . next , struct ath_rxbuf , list ) ;
2010-04-15 17:38:48 -04:00
/*
* On some hardware the descriptor status words could
* get corrupted , including the done bit . Because of
* this , check if the next descriptor ' s done bit is
* set or not .
*
* If the next descriptor ' s done bit is set , the current
* descriptor has been corrupted . Force s / w to discard
* this descriptor and continue . . .
*/
tds = tbf - > bf_desc ;
2011-08-13 10:28:11 +05:30
ret = ath9k_hw_rxprocdesc ( ah , tds , & trs ) ;
2010-04-15 17:38:48 -04:00
if ( ret = = - EINPROGRESS )
return NULL ;
2013-04-08 00:04:11 +02:00
/*
2014-02-24 22:26:06 +01:00
* Re - check previous descriptor , in case it has been filled
* in the mean time .
2013-04-08 00:04:11 +02:00
*/
2014-02-24 22:26:06 +01:00
ret = ath9k_hw_rxprocdesc ( ah , ds , rs ) ;
if ( ret = = - EINPROGRESS ) {
/*
* mark descriptor as zero - length and set the ' more '
* flag to ensure that both buffers get discarded
*/
rs - > rs_datalen = 0 ;
rs - > rs_more = true ;
}
2010-04-15 17:38:48 -04:00
}
2013-01-09 16:16:52 +01:00
list_del ( & bf - > list ) ;
2010-04-15 17:38:48 -04:00
if ( ! bf - > bf_mpdu )
return bf ;
/*
* Synchronize the DMA transfer with CPU before
* 1. accessing the frame
* 2. requeueing the same buffer to h / w
*/
2010-05-15 18:25:40 +08:00
dma_sync_single_for_cpu ( sc - > dev , bf - > bf_buf_addr ,
2010-04-15 17:38:48 -04:00
common - > rx_bufsize ,
DMA_FROM_DEVICE ) ;
return bf ;
}
2013-08-14 09:11:13 +05:30
static void ath9k_process_tsf ( struct ath_rx_status * rs ,
struct ieee80211_rx_status * rxs ,
u64 tsf )
{
u32 tsf_lower = tsf & 0xffffffff ;
rxs - > mactime = ( tsf & ~ 0xffffffffULL ) | rs - > rs_tstamp ;
if ( rs - > rs_tstamp > tsf_lower & &
unlikely ( rs - > rs_tstamp - tsf_lower > 0x10000000 ) )
rxs - > mactime - = 0x100000000ULL ;
if ( rs - > rs_tstamp < tsf_lower & &
unlikely ( tsf_lower - rs - > rs_tstamp > 0x10000000 ) )
rxs - > mactime + = 0x100000000ULL ;
}
2010-05-20 15:34:38 +05:30
/*
* For Decrypt or Demic errors , we only mark packet status here and always push
* up the frame up to let mac80211 handle the actual error case , be it no
* decryption key or real decryption error . This let us keep statistics there .
*/
2013-04-08 00:04:11 +02:00
static int ath9k_rx_skb_preprocess ( struct ath_softc * sc ,
2013-08-14 09:11:18 +05:30
struct sk_buff * skb ,
2010-05-20 15:34:38 +05:30
struct ath_rx_status * rx_stats ,
struct ieee80211_rx_status * rx_status ,
2013-08-14 09:11:13 +05:30
bool * decrypt_error , u64 tsf )
2010-05-20 15:34:38 +05:30
{
2013-04-08 00:04:11 +02:00
struct ieee80211_hw * hw = sc - > hw ;
struct ath_hw * ah = sc - > sc_ah ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
2013-08-14 09:11:18 +05:30
struct ieee80211_hdr * hdr ;
2013-04-08 00:04:11 +02:00
bool discard_current = sc - > rx . discard_next ;
2013-08-14 09:11:11 +05:30
/*
* Discard corrupt descriptors which are marked in
* ath_get_next_rx_buf ( ) .
*/
2013-04-08 00:04:11 +02:00
if ( discard_current )
2014-02-24 22:26:06 +01:00
goto corrupt ;
sc - > rx . discard_next = false ;
2011-07-28 14:08:57 +02:00
2013-08-14 09:11:11 +05:30
/*
* Discard zero - length packets .
*/
if ( ! rx_stats - > rs_datalen ) {
RX_STAT_INC ( rx_len_err ) ;
2014-02-24 22:26:06 +01:00
goto corrupt ;
2013-08-14 09:11:11 +05:30
}
2014-02-24 22:26:06 +01:00
/*
* rs_status follows rs_datalen so if rs_datalen is too large
* we can take a hint that hardware corrupted it , so ignore
* those frames .
*/
2013-08-14 09:11:11 +05:30
if ( rx_stats - > rs_datalen > ( common - > rx_bufsize - ah - > caps . rx_status_len ) ) {
RX_STAT_INC ( rx_len_err ) ;
2014-02-24 22:26:06 +01:00
goto corrupt ;
2013-08-14 09:11:11 +05:30
}
2013-08-14 09:11:12 +05:30
/* Only use status info from the last fragment */
if ( rx_stats - > rs_more )
return 0 ;
2013-08-14 09:11:20 +05:30
/*
* Return immediately if the RX descriptor has been marked
* as corrupt based on the various error bits .
*
* This is different from the other corrupt descriptor
* condition handled above .
*/
2014-02-24 22:26:06 +01:00
if ( rx_stats - > rs_status & ATH9K_RXERR_CORRUPT_DESC )
goto corrupt ;
2013-08-14 09:11:20 +05:30
2013-08-14 09:11:18 +05:30
hdr = ( struct ieee80211_hdr * ) ( skb - > data + ah - > caps . rx_status_len ) ;
2013-08-14 09:11:13 +05:30
ath9k_process_tsf ( rx_stats , rx_status , tsf ) ;
2013-08-14 09:11:16 +05:30
ath_debug_stat_rx ( sc , rx_stats ) ;
2013-08-14 09:11:13 +05:30
2013-08-14 09:11:15 +05:30
/*
* Process PHY errors and return so that the packet
* can be dropped .
*/
if ( rx_stats - > rs_status & ATH9K_RXERR_PHY ) {
ath9k_dfs_process_phyerr ( sc , hdr , rx_stats , rx_status - > mactime ) ;
2014-11-06 08:53:30 +01:00
if ( ath_cmn_process_fft ( & sc - > spec_priv , hdr , rx_stats , rx_status - > mactime ) )
2013-08-14 09:11:15 +05:30
RX_STAT_INC ( rx_spectral ) ;
2014-02-24 22:26:06 +01:00
return - EINVAL ;
2013-08-14 09:11:15 +05:30
}
2010-05-20 15:34:38 +05:30
/*
* everything but the rate is checked here , the rate check is done
* separately to avoid doing two lookups for a rate for each frame .
*/
2014-09-05 08:03:18 +05:30
spin_lock_bh ( & sc - > chan_lock ) ;
if ( ! ath9k_cmn_rx_accept ( common , hdr , rx_status , rx_stats , decrypt_error ,
sc - > cur_chan - > rxfilter ) ) {
spin_unlock_bh ( & sc - > chan_lock ) ;
2014-02-24 22:26:06 +01:00
return - EINVAL ;
2014-09-05 08:03:18 +05:30
}
spin_unlock_bh ( & sc - > chan_lock ) ;
2010-05-20 15:34:38 +05:30
2014-01-15 17:07:15 +01:00
if ( ath_is_mybeacon ( common , hdr ) ) {
RX_STAT_INC ( rx_beacons ) ;
rx_stats - > is_mybeacon = true ;
}
2013-08-14 09:11:18 +05:30
2014-01-09 08:51:14 +05:30
/*
* This shouldn ' t happen , but have a safety check anyway .
*/
2014-02-24 22:26:06 +01:00
if ( WARN_ON ( ! ah - > curchan ) )
return - EINVAL ;
2014-01-09 08:51:14 +05:30
2014-02-04 10:27:40 +01:00
if ( ath9k_cmn_process_rate ( common , hw , rx_stats , rx_status ) ) {
/*
* No valid hardware bitrate found - - we should not get here
* because hardware has already validated this frame as OK .
*/
ath_dbg ( common , ANY , " unsupported hw bitrate detected 0x%02x using 1 Mbit \n " ,
rx_stats - > rs_rate ) ;
RX_STAT_INC ( rx_rate_err ) ;
2014-02-24 22:26:06 +01:00
return - EINVAL ;
2013-08-14 09:11:21 +05:30
}
2010-05-20 15:34:38 +05:30
2014-08-23 13:29:16 +05:30
if ( ath9k_is_chanctx_enabled ( ) ) {
2014-08-23 13:29:18 +05:30
if ( rx_stats - > is_mybeacon )
2014-09-15 11:25:50 +05:30
ath_chanctx_beacon_recv_ev ( sc ,
2014-08-23 13:29:18 +05:30
ATH_CHANCTX_EVENT_BEACON_RECEIVED ) ;
2014-06-11 16:18:08 +05:30
}
2014-02-04 10:27:39 +01:00
ath9k_cmn_process_rssi ( common , hw , rx_stats , rx_status ) ;
2013-06-03 09:19:23 +05:30
2014-01-09 08:51:14 +05:30
rx_status - > band = ah - > curchan - > chan - > band ;
rx_status - > freq = ah - > curchan - > chan - > center_freq ;
2010-05-20 15:34:38 +05:30
rx_status - > antenna = rx_stats - > rs_antenna ;
2012-12-10 14:48:01 -08:00
rx_status - > flag | = RX_FLAG_MACTIME_END ;
2010-05-20 15:34:38 +05:30
2013-08-14 09:11:17 +05:30
# ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
if ( ieee80211_is_data_present ( hdr - > frame_control ) & &
! ieee80211_is_qos_nullfunc ( hdr - > frame_control ) )
sc - > rx . num_pkts + + ;
# endif
2014-02-24 22:26:06 +01:00
return 0 ;
corrupt :
sc - > rx . discard_next = rx_stats - > rs_more ;
return - EINVAL ;
2010-05-20 15:34:38 +05:30
}
2013-08-14 21:15:56 +05:30
/*
* Run the LNA combining algorithm only in these cases :
*
* Standalone WLAN cards with both LNA / Antenna diversity
* enabled in the EEPROM .
*
* WLAN + BT cards which are in the supported card list
* in ath_pci_id_table and the user has loaded the
* driver with " bt_ant_diversity " set to true .
*/
static void ath9k_antenna_check ( struct ath_softc * sc ,
struct ath_rx_status * rs )
{
struct ath_hw * ah = sc - > sc_ah ;
struct ath9k_hw_capabilities * pCap = & ah - > caps ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
if ( ! ( ah - > caps . hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB ) )
return ;
/*
* Change the default rx antenna if rx diversity
* chooses the other antenna 3 times in a row .
*/
if ( sc - > rx . defant ! = rs - > rs_antenna ) {
if ( + + sc - > rx . rxotherant > = 3 )
ath_setdefantenna ( sc , rs - > rs_antenna ) ;
} else {
sc - > rx . rxotherant = 0 ;
}
if ( pCap - > hw_caps & ATH9K_HW_CAP_BT_ANT_DIV ) {
if ( common - > bt_ant_diversity )
ath_ant_comb_scan ( sc , rs ) ;
} else {
ath_ant_comb_scan ( sc , rs ) ;
}
}
2013-01-30 23:37:41 +01:00
static void ath9k_apply_ampdu_details ( struct ath_softc * sc ,
struct ath_rx_status * rs , struct ieee80211_rx_status * rxs )
{
if ( rs - > rs_isaggr ) {
rxs - > flag | = RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN ;
rxs - > ampdu_reference = sc - > rx . ampdu_ref ;
if ( ! rs - > rs_moreaggr ) {
rxs - > flag | = RX_FLAG_AMPDU_IS_LAST ;
sc - > rx . ampdu_ref + + ;
}
if ( rs - > rs_flags & ATH9K_RX_DELIM_CRC_PRE )
rxs - > flag | = RX_FLAG_AMPDU_DELIM_CRC_ERROR ;
}
}
2010-04-15 17:38:48 -04:00
int ath_rx_tasklet ( struct ath_softc * sc , int flush , bool hp )
{
2013-10-11 23:30:52 +02:00
struct ath_rxbuf * bf ;
2011-01-26 18:23:27 +01:00
struct sk_buff * skb = NULL , * requeue_skb , * hdr_skb ;
2009-11-04 08:20:42 -08:00
struct ieee80211_rx_status * rxs ;
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2009-09-10 11:08:14 -07:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2011-01-24 19:23:16 +01:00
struct ieee80211_hw * hw = sc - > hw ;
2009-11-04 16:47:22 -08:00
int retval ;
2010-03-29 20:14:23 -07:00
struct ath_rx_status rs ;
2010-04-15 17:38:48 -04:00
enum ath9k_rx_qtype qtype ;
bool edma = ! ! ( ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA ) ;
int dma_type ;
2010-06-12 00:33:54 -04:00
u64 tsf = 0 ;
2010-09-16 15:12:26 -04:00
unsigned long flags ;
2013-04-08 00:04:10 +02:00
dma_addr_t new_buf_addr ;
2014-04-21 16:14:57 -07:00
unsigned int budget = 512 ;
2014-09-16 02:13:12 +02:00
struct ieee80211_hdr * hdr ;
2008-11-18 09:05:55 +05:30
2010-04-15 17:38:48 -04:00
if ( edma )
dma_type = DMA_BIDIRECTIONAL ;
2010-05-14 21:15:38 +08:00
else
dma_type = DMA_FROM_DEVICE ;
2010-04-15 17:38:48 -04:00
qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP ;
2008-08-04 00:16:41 -07:00
2010-06-12 00:33:54 -04:00
tsf = ath9k_hw_gettsf64 ( ah ) ;
2008-08-04 00:16:41 -07:00
do {
2012-08-10 11:00:24 +02:00
bool decrypt_error = false ;
2008-08-04 00:16:41 -07:00
2010-03-29 20:14:23 -07:00
memset ( & rs , 0 , sizeof ( rs ) ) ;
2010-04-15 17:38:48 -04:00
if ( edma )
bf = ath_edma_get_next_rx_buf ( sc , & rs , qtype ) ;
else
bf = ath_get_next_rx_buf ( sc , & rs ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
if ( ! bf )
break ;
2008-08-04 00:16:41 -07:00
skb = bf - > bf_mpdu ;
2008-11-18 09:05:55 +05:30
if ( ! skb )
2008-08-04 00:16:41 -07:00
continue ;
2011-01-26 18:23:27 +01:00
/*
* Take frame header from the first fragment and RX status from
* the last one .
*/
if ( sc - > rx . frag )
hdr_skb = sc - > rx . frag ;
else
hdr_skb = skb ;
2013-08-14 09:11:09 +05:30
rxs = IEEE80211_SKB_RXCB ( hdr_skb ) ;
2012-03-09 18:57:39 -08:00
memset ( rxs , 0 , sizeof ( struct ieee80211_rx_status ) ) ;
2013-08-14 09:11:18 +05:30
retval = ath9k_rx_skb_preprocess ( sc , hdr_skb , & rs , rxs ,
2013-08-14 09:11:13 +05:30
& decrypt_error , tsf ) ;
2011-11-16 11:09:44 +01:00
if ( retval )
goto requeue_drop_frag ;
2008-11-21 17:41:33 -08:00
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer ' s skb */
2009-11-04 09:11:34 -08:00
requeue_skb = ath_rxbuf_alloc ( common , common - > rx_bufsize , GFP_ATOMIC ) ;
2008-11-21 17:41:33 -08:00
/* If there is no memory we ignore the current RX'd frame,
* tell hardware it can give us a new frame using the old
2008-12-07 21:44:03 +05:30
* skb and put it at the tail of the sc - > rx . rxbuf list for
2008-11-21 17:41:33 -08:00
* processing . */
2012-04-03 09:18:59 -07:00
if ( ! requeue_skb ) {
RX_STAT_INC ( rx_oom_err ) ;
2011-01-26 18:23:27 +01:00
goto requeue_drop_frag ;
2012-04-03 09:18:59 -07:00
}
2008-08-04 00:16:41 -07:00
2013-04-08 00:04:10 +02:00
/* We will now give hardware our shiny new allocated skb */
new_buf_addr = dma_map_single ( sc - > dev , requeue_skb - > data ,
common - > rx_bufsize , dma_type ) ;
if ( unlikely ( dma_mapping_error ( sc - > dev , new_buf_addr ) ) ) {
dev_kfree_skb_any ( requeue_skb ) ;
goto requeue_drop_frag ;
}
2008-12-15 20:40:46 +05:30
/* Unmap the frame */
2009-01-14 20:17:03 +01:00
dma_unmap_single ( sc - > dev , bf - > bf_buf_addr ,
2013-04-08 00:04:10 +02:00
common - > rx_bufsize , dma_type ) ;
2008-08-04 00:16:41 -07:00
2013-04-23 12:22:19 +05:30
bf - > bf_mpdu = requeue_skb ;
bf - > bf_buf_addr = new_buf_addr ;
2010-04-15 17:38:48 -04:00
skb_put ( skb , rs . rs_datalen + ah - > caps . rx_status_len ) ;
if ( ah - > caps . rx_status_len )
skb_pull ( skb , ah - > caps . rx_status_len ) ;
2008-11-18 09:05:55 +05:30
2011-01-26 18:23:27 +01:00
if ( ! rs . rs_more )
2014-02-04 10:27:47 +01:00
ath9k_cmn_rx_skb_postprocess ( common , hdr_skb , & rs ,
rxs , decrypt_error ) ;
2008-11-18 09:05:55 +05:30
2011-01-26 18:23:27 +01:00
if ( rs . rs_more ) {
2012-04-03 09:18:59 -07:00
RX_STAT_INC ( rx_frags ) ;
2011-01-26 18:23:27 +01:00
/*
* rs_more indicates chained descriptors which can be
* used to link buffers together for a sort of
* scatter - gather operation .
*/
if ( sc - > rx . frag ) {
/* too many fragments - cannot handle frame */
dev_kfree_skb_any ( sc - > rx . frag ) ;
dev_kfree_skb_any ( skb ) ;
2012-04-03 09:18:59 -07:00
RX_STAT_INC ( rx_too_many_frags_err ) ;
2011-01-26 18:23:27 +01:00
skb = NULL ;
}
sc - > rx . frag = skb ;
goto requeue ;
}
if ( sc - > rx . frag ) {
int space = skb - > len - skb_tailroom ( hdr_skb ) ;
if ( pskb_expand_head ( hdr_skb , 0 , space , GFP_ATOMIC ) < 0 ) {
dev_kfree_skb ( skb ) ;
2012-04-03 09:18:59 -07:00
RX_STAT_INC ( rx_oom_err ) ;
2011-01-26 18:23:27 +01:00
goto requeue_drop_frag ;
}
2012-03-15 13:43:29 -07:00
sc - > rx . frag = NULL ;
2011-01-26 18:23:27 +01:00
skb_copy_from_linear_data ( skb , skb_put ( hdr_skb , skb - > len ) ,
skb - > len ) ;
dev_kfree_skb_any ( skb ) ;
skb = hdr_skb ;
}
2013-08-07 12:54:30 +05:30
if ( rxs - > flag & RX_FLAG_MMIC_STRIPPED )
skb_trim ( skb , skb - > len - 8 ) ;
2011-11-29 20:30:35 +05:30
2013-08-07 12:54:30 +05:30
spin_lock_irqsave ( & sc - > sc_pm_lock , flags ) ;
if ( ( sc - > ps_flags & ( PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA ) ) | |
ath9k_check_auto_sleep ( sc ) )
ath_rx_ps ( sc , skb , rs . is_mybeacon ) ;
spin_unlock_irqrestore ( & sc - > sc_pm_lock , flags ) ;
2011-11-29 20:30:35 +05:30
2013-08-14 21:15:56 +05:30
ath9k_antenna_check ( sc , & rs ) ;
2013-01-30 23:37:41 +01:00
ath9k_apply_ampdu_details ( sc , & rs , rxs ) ;
2014-01-13 07:29:30 +05:30
ath_debug_rate_stats ( sc , & rs , skb ) ;
2013-01-30 23:37:41 +01:00
2014-09-16 02:13:12 +02:00
hdr = ( struct ieee80211_hdr * ) skb - > data ;
if ( ieee80211_is_ack ( hdr - > frame_control ) )
ath_dynack_sample_ack_ts ( sc - > sc_ah , skb , rs . rs_tstamp ) ;
2011-01-24 19:23:16 +01:00
ieee80211_rx ( hw , skb ) ;
2009-05-14 21:28:48 +03:00
2011-01-26 18:23:27 +01:00
requeue_drop_frag :
if ( sc - > rx . frag ) {
dev_kfree_skb_any ( sc - > rx . frag ) ;
sc - > rx . frag = NULL ;
}
2008-11-21 17:41:33 -08:00
requeue :
2013-01-09 16:16:52 +01:00
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
2014-05-23 19:21:34 +02:00
if ( ! edma ) {
ath_rx_buf_relink ( sc , bf , flush ) ;
2014-04-21 16:14:56 -07:00
if ( ! flush )
ath9k_hw_rxena ( ah ) ;
2014-05-23 19:21:34 +02:00
} else if ( ! flush ) {
ath_rx_edma_buf_link ( sc , qtype ) ;
2010-04-15 17:38:48 -04:00
}
2014-04-21 16:14:57 -07:00
if ( ! budget - - )
break ;
2008-11-18 09:05:55 +05:30
} while ( 1 ) ;
2011-08-13 10:28:10 +05:30
if ( ! ( ah - > imask & ATH9K_INT_RXEOL ) ) {
ah - > imask | = ( ATH9K_INT_RXEOL | ATH9K_INT_RXORN ) ;
2011-10-08 20:06:19 +02:00
ath9k_hw_set_interrupts ( ah ) ;
2011-08-13 10:28:10 +05:30
}
2008-08-04 00:16:41 -07:00
return 0 ;
}