2008-08-04 00:16:41 -07:00
/*
2011-05-17 13:36:18 +05:30
* Copyright ( c ) 2008 - 2011 Atheros Communications Inc .
2008-08-04 00:16:41 -07:00
*
* Permission to use , copy , modify , and / or distribute this software for any
* purpose with or without fee is hereby granted , provided that the above
* copyright notice and this permission notice appear in all copies .
*
* THE SOFTWARE IS PROVIDED " AS IS " AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS . IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL , DIRECT , INDIRECT , OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
* ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE .
*/
2011-06-16 11:01:34 +00:00
# include <linux/dma-mapping.h>
2009-02-09 13:26:54 +05:30
# include "ath9k.h"
2010-04-15 17:39:28 -04:00
# include "ar9003_mac.h"
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
# define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
2010-09-02 01:34:43 -07:00
static inline bool ath_is_alt_ant_ratio_better ( int alt_ratio , int maxdelta ,
int mindelta , int main_rssi_avg ,
int alt_rssi_avg , int pkt_count )
{
return ( ( ( alt_ratio > = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 ) & &
( alt_rssi_avg > main_rssi_avg + maxdelta ) ) | |
( alt_rssi_avg > main_rssi_avg + mindelta ) ) & & ( pkt_count > 50 ) ;
}
2011-05-13 20:31:09 +05:30
static inline bool ath_ant_div_comb_alt_check ( u8 div_group , int alt_ratio ,
int curr_main_set , int curr_alt_set ,
int alt_rssi_avg , int main_rssi_avg )
{
bool result = false ;
switch ( div_group ) {
case 0 :
if ( alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO )
result = true ;
break ;
case 1 :
2011-06-21 11:23:43 +02:00
case 2 :
2011-05-13 20:31:09 +05:30
if ( ( ( ( curr_main_set = = ATH_ANT_DIV_COMB_LNA2 ) & &
( curr_alt_set = = ATH_ANT_DIV_COMB_LNA1 ) & &
( alt_rssi_avg > = ( main_rssi_avg - 5 ) ) ) | |
( ( curr_main_set = = ATH_ANT_DIV_COMB_LNA1 ) & &
( curr_alt_set = = ATH_ANT_DIV_COMB_LNA2 ) & &
( alt_rssi_avg > = ( main_rssi_avg - 2 ) ) ) ) & &
( alt_rssi_avg > = 4 ) )
result = true ;
else
result = false ;
break ;
}
return result ;
}
2010-05-22 23:58:13 -07:00
static inline bool ath9k_check_auto_sleep ( struct ath_softc * sc )
{
return sc - > ps_enabled & &
( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_AUTOSLEEP ) ;
}
2008-08-04 00:16:41 -07:00
/*
* Setup and link descriptors .
*
* 11 N : we can no longer afford to self link the last descriptor .
* MAC acknowledges BA status as long as it copies frames to host
* buffer ( or rx fifo ) . This can incorrectly acknowledge packets
* to a sender if last desc is self - linked .
*/
static void ath_rx_buf_link ( struct ath_softc * sc , struct ath_buf * bf )
{
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2009-11-04 09:11:34 -08:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-08-04 00:16:41 -07:00
struct ath_desc * ds ;
struct sk_buff * skb ;
ATH_RXBUF_RESET ( bf ) ;
ds = bf - > bf_desc ;
2008-11-18 09:05:55 +05:30
ds - > ds_link = 0 ; /* link to null */
2008-08-04 00:16:41 -07:00
ds - > ds_data = bf - > bf_buf_addr ;
2008-11-18 09:05:55 +05:30
/* virtual addr of the beginning of the buffer. */
2008-08-04 00:16:41 -07:00
skb = bf - > bf_mpdu ;
2009-09-13 23:28:00 -07:00
BUG_ON ( skb = = NULL ) ;
2008-08-04 00:16:41 -07:00
ds - > ds_vdata = skb - > data ;
2009-11-04 09:11:34 -08:00
/*
* setup rx descriptors . The rx_bufsize here tells the hardware
2008-11-20 17:15:13 -08:00
* how much data it can DMA to us and that we are prepared
2009-11-04 09:11:34 -08:00
* to process
*/
2008-12-07 21:44:03 +05:30
ath9k_hw_setuprxdesc ( ah , ds ,
2009-11-04 09:11:34 -08:00
common - > rx_bufsize ,
2008-08-04 00:16:41 -07:00
0 ) ;
2008-12-07 21:44:03 +05:30
if ( sc - > rx . rxlink = = NULL )
2008-08-04 00:16:41 -07:00
ath9k_hw_putrxbuf ( ah , bf - > bf_daddr ) ;
else
2008-12-07 21:44:03 +05:30
* sc - > rx . rxlink = bf - > bf_daddr ;
2008-08-04 00:16:41 -07:00
2008-12-07 21:44:03 +05:30
sc - > rx . rxlink = & ds - > ds_link ;
2008-08-04 00:16:41 -07:00
}
2008-11-24 12:07:55 +05:30
static void ath_setdefantenna ( struct ath_softc * sc , u32 antenna )
{
/* XXX block beacon interrupts */
ath9k_hw_setantenna ( sc - > sc_ah , antenna ) ;
2008-12-07 21:44:03 +05:30
sc - > rx . defant = antenna ;
sc - > rx . rxotherant = 0 ;
2008-11-24 12:07:55 +05:30
}
2008-08-04 00:16:41 -07:00
static void ath_opmode_init ( struct ath_softc * sc )
{
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2009-09-10 09:22:37 -07:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-08-04 00:16:41 -07:00
u32 rfilt , mfilt [ 2 ] ;
/* configure rx filter */
rfilt = ath_calcrxfilter ( sc ) ;
ath9k_hw_setrxfilter ( ah , rfilt ) ;
/* configure bssid mask */
2010-09-14 20:22:44 +02:00
ath_hw_setbssidmask ( common ) ;
2008-08-04 00:16:41 -07:00
/* configure operational mode */
ath9k_hw_setopmode ( ah ) ;
/* calculate and install multicast filter */
mfilt [ 0 ] = mfilt [ 1 ] = ~ 0 ;
ath9k_hw_setmcastfilter ( ah , mfilt [ 0 ] , mfilt [ 1 ] ) ;
}
2010-04-15 17:38:48 -04:00
static bool ath_rx_edma_buf_link ( struct ath_softc * sc ,
enum ath9k_rx_qtype qtype )
2008-08-04 00:16:41 -07:00
{
2010-04-15 17:38:48 -04:00
struct ath_hw * ah = sc - > sc_ah ;
struct ath_rx_edma * rx_edma ;
2008-08-04 00:16:41 -07:00
struct sk_buff * skb ;
struct ath_buf * bf ;
2010-04-15 17:38:48 -04:00
rx_edma = & sc - > rx . rx_edma [ qtype ] ;
if ( skb_queue_len ( & rx_edma - > rx_fifo ) > = rx_edma - > rx_fifo_hwsize )
return false ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
bf = list_first_entry ( & sc - > rx . rxbuf , struct ath_buf , list ) ;
list_del_init ( & bf - > list ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
skb = bf - > bf_mpdu ;
ATH_RXBUF_RESET ( bf ) ;
memset ( skb - > data , 0 , ah - > caps . rx_status_len ) ;
dma_sync_single_for_device ( sc - > dev , bf - > bf_buf_addr ,
ah - > caps . rx_status_len , DMA_TO_DEVICE ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
SKB_CB_ATHBUF ( skb ) = bf ;
ath9k_hw_addrxbuf_edma ( ah , bf - > bf_buf_addr , qtype ) ;
skb_queue_tail ( & rx_edma - > rx_fifo , skb ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
return true ;
}
static void ath_rx_addbuffer_edma ( struct ath_softc * sc ,
enum ath9k_rx_qtype qtype , int size )
{
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
u32 nbuf = 0 ;
if ( list_empty ( & sc - > rx . rxbuf ) ) {
2010-12-02 19:12:37 -08:00
ath_dbg ( common , ATH_DBG_QUEUE , " No free rx buf available \n " ) ;
2010-04-15 17:38:48 -04:00
return ;
2009-03-30 15:28:45 +05:30
}
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
while ( ! list_empty ( & sc - > rx . rxbuf ) ) {
nbuf + + ;
if ( ! ath_rx_edma_buf_link ( sc , qtype ) )
break ;
if ( nbuf > = size )
break ;
}
}
static void ath_rx_remove_buffer ( struct ath_softc * sc ,
enum ath9k_rx_qtype qtype )
{
struct ath_buf * bf ;
struct ath_rx_edma * rx_edma ;
struct sk_buff * skb ;
rx_edma = & sc - > rx . rx_edma [ qtype ] ;
while ( ( skb = skb_dequeue ( & rx_edma - > rx_fifo ) ) ! = NULL ) {
bf = SKB_CB_ATHBUF ( skb ) ;
BUG_ON ( ! bf ) ;
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
}
}
static void ath_rx_edma_cleanup ( struct ath_softc * sc )
{
struct ath_buf * bf ;
ath_rx_remove_buffer ( sc , ATH9K_RX_QUEUE_LP ) ;
ath_rx_remove_buffer ( sc , ATH9K_RX_QUEUE_HP ) ;
2009-03-30 15:28:45 +05:30
list_for_each_entry ( bf , & sc - > rx . rxbuf , list ) {
2010-04-15 17:38:48 -04:00
if ( bf - > bf_mpdu )
dev_kfree_skb_any ( bf - > bf_mpdu ) ;
}
INIT_LIST_HEAD ( & sc - > rx . rxbuf ) ;
kfree ( sc - > rx . rx_bufptr ) ;
sc - > rx . rx_bufptr = NULL ;
}
static void ath_rx_edma_init_queue ( struct ath_rx_edma * rx_edma , int size )
{
skb_queue_head_init ( & rx_edma - > rx_fifo ) ;
skb_queue_head_init ( & rx_edma - > rx_buffers ) ;
rx_edma - > rx_fifo_hwsize = size ;
}
static int ath_rx_edma_init ( struct ath_softc * sc , int nbufs )
{
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
struct ath_hw * ah = sc - > sc_ah ;
struct sk_buff * skb ;
struct ath_buf * bf ;
int error = 0 , i ;
u32 size ;
ath9k_hw_set_rx_bufsize ( ah , common - > rx_bufsize -
ah - > caps . rx_status_len ) ;
ath_rx_edma_init_queue ( & sc - > rx . rx_edma [ ATH9K_RX_QUEUE_LP ] ,
ah - > caps . rx_lp_qdepth ) ;
ath_rx_edma_init_queue ( & sc - > rx . rx_edma [ ATH9K_RX_QUEUE_HP ] ,
ah - > caps . rx_hp_qdepth ) ;
size = sizeof ( struct ath_buf ) * nbufs ;
bf = kzalloc ( size , GFP_KERNEL ) ;
if ( ! bf )
return - ENOMEM ;
INIT_LIST_HEAD ( & sc - > rx . rxbuf ) ;
sc - > rx . rx_bufptr = bf ;
for ( i = 0 ; i < nbufs ; i + + , bf + + ) {
2009-11-04 09:11:34 -08:00
skb = ath_rxbuf_alloc ( common , common - > rx_bufsize , GFP_KERNEL ) ;
2010-04-15 17:38:48 -04:00
if ( ! skb ) {
2009-03-30 15:28:45 +05:30
error = - ENOMEM ;
2010-04-15 17:38:48 -04:00
goto rx_init_fail ;
2008-08-04 00:16:41 -07:00
}
2010-04-15 17:38:48 -04:00
memset ( skb - > data , 0 , common - > rx_bufsize ) ;
2009-03-30 15:28:45 +05:30
bf - > bf_mpdu = skb ;
2010-04-15 17:38:48 -04:00
2009-03-30 15:28:45 +05:30
bf - > bf_buf_addr = dma_map_single ( sc - > dev , skb - > data ,
2009-11-04 09:11:34 -08:00
common - > rx_bufsize ,
2010-04-15 17:38:48 -04:00
DMA_BIDIRECTIONAL ) ;
2009-03-30 15:28:45 +05:30
if ( unlikely ( dma_mapping_error ( sc - > dev ,
2010-04-15 17:38:48 -04:00
bf - > bf_buf_addr ) ) ) {
dev_kfree_skb_any ( skb ) ;
bf - > bf_mpdu = NULL ;
2010-10-14 12:45:30 -07:00
bf - > bf_buf_addr = 0 ;
2010-12-02 19:12:36 -08:00
ath_err ( common ,
2010-04-15 17:38:48 -04:00
" dma_mapping_error() on RX init \n " ) ;
error = - ENOMEM ;
goto rx_init_fail ;
}
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
}
return 0 ;
rx_init_fail :
ath_rx_edma_cleanup ( sc ) ;
return error ;
}
static void ath_edma_start_recv ( struct ath_softc * sc )
{
spin_lock_bh ( & sc - > rx . rxbuflock ) ;
ath9k_hw_rxena ( sc - > sc_ah ) ;
ath_rx_addbuffer_edma ( sc , ATH9K_RX_QUEUE_HP ,
sc - > rx . rx_edma [ ATH9K_RX_QUEUE_HP ] . rx_fifo_hwsize ) ;
ath_rx_addbuffer_edma ( sc , ATH9K_RX_QUEUE_LP ,
sc - > rx . rx_edma [ ATH9K_RX_QUEUE_LP ] . rx_fifo_hwsize ) ;
ath_opmode_init ( sc ) ;
2010-09-16 15:12:28 -04:00
ath9k_hw_startpcureceive ( sc - > sc_ah , ( sc - > sc_flags & SC_OP_OFFCHANNEL ) ) ;
2010-10-20 16:07:04 -07:00
spin_unlock_bh ( & sc - > rx . rxbuflock ) ;
2010-04-15 17:38:48 -04:00
}
static void ath_edma_stop_recv ( struct ath_softc * sc )
{
ath_rx_remove_buffer ( sc , ATH9K_RX_QUEUE_HP ) ;
ath_rx_remove_buffer ( sc , ATH9K_RX_QUEUE_LP ) ;
}
int ath_rx_init ( struct ath_softc * sc , int nbufs )
{
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
struct sk_buff * skb ;
struct ath_buf * bf ;
int error = 0 ;
2010-10-26 15:27:24 -07:00
spin_lock_init ( & sc - > sc_pcu_lock ) ;
2010-04-15 17:38:48 -04:00
sc - > sc_flags & = ~ SC_OP_RXFLUSH ;
spin_lock_init ( & sc - > rx . rxbuflock ) ;
2011-01-26 18:23:27 +01:00
common - > rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
sc - > sc_ah - > caps . rx_status_len ;
2010-04-15 17:38:48 -04:00
if ( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA ) {
return ath_rx_edma_init ( sc , nbufs ) ;
} else {
2010-12-02 19:12:37 -08:00
ath_dbg ( common , ATH_DBG_CONFIG , " cachelsz %u rxbufsize %u \n " ,
common - > cachelsz , common - > rx_bufsize ) ;
2010-04-15 17:38:48 -04:00
/* Initialize rx descriptors */
error = ath_descdma_setup ( sc , & sc - > rx . rxdma , & sc - > rx . rxbuf ,
2010-04-15 17:39:33 -04:00
" rx " , nbufs , 1 , 0 ) ;
2010-04-15 17:38:48 -04:00
if ( error ! = 0 ) {
2010-12-02 19:12:36 -08:00
ath_err ( common ,
" failed to allocate rx descriptors: %d \n " ,
error ) ;
2009-03-30 15:28:45 +05:30
goto err ;
}
2010-04-15 17:38:48 -04:00
list_for_each_entry ( bf , & sc - > rx . rxbuf , list ) {
skb = ath_rxbuf_alloc ( common , common - > rx_bufsize ,
GFP_KERNEL ) ;
if ( skb = = NULL ) {
error = - ENOMEM ;
goto err ;
}
bf - > bf_mpdu = skb ;
bf - > bf_buf_addr = dma_map_single ( sc - > dev , skb - > data ,
common - > rx_bufsize ,
DMA_FROM_DEVICE ) ;
if ( unlikely ( dma_mapping_error ( sc - > dev ,
bf - > bf_buf_addr ) ) ) {
dev_kfree_skb_any ( skb ) ;
bf - > bf_mpdu = NULL ;
2010-10-14 12:45:30 -07:00
bf - > bf_buf_addr = 0 ;
2010-12-02 19:12:36 -08:00
ath_err ( common ,
" dma_mapping_error() on RX init \n " ) ;
2010-04-15 17:38:48 -04:00
error = - ENOMEM ;
goto err ;
}
}
sc - > rx . rxlink = NULL ;
2009-03-30 15:28:45 +05:30
}
2008-08-04 00:16:41 -07:00
2009-03-30 15:28:45 +05:30
err :
2008-08-04 00:16:41 -07:00
if ( error )
ath_rx_cleanup ( sc ) ;
return error ;
}
void ath_rx_cleanup ( struct ath_softc * sc )
{
2009-11-04 09:11:34 -08:00
struct ath_hw * ah = sc - > sc_ah ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-08-04 00:16:41 -07:00
struct sk_buff * skb ;
struct ath_buf * bf ;
2010-04-15 17:38:48 -04:00
if ( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA ) {
ath_rx_edma_cleanup ( sc ) ;
return ;
} else {
list_for_each_entry ( bf , & sc - > rx . rxbuf , list ) {
skb = bf - > bf_mpdu ;
if ( skb ) {
dma_unmap_single ( sc - > dev , bf - > bf_buf_addr ,
common - > rx_bufsize ,
DMA_FROM_DEVICE ) ;
dev_kfree_skb ( skb ) ;
2010-10-14 12:45:30 -07:00
bf - > bf_buf_addr = 0 ;
bf - > bf_mpdu = NULL ;
2010-04-15 17:38:48 -04:00
}
2009-03-23 18:25:01 -04:00
}
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
if ( sc - > rx . rxdma . dd_desc_len ! = 0 )
ath_descdma_cleanup ( sc , & sc - > rx . rxdma , & sc - > rx . rxbuf ) ;
}
2008-08-04 00:16:41 -07:00
}
/*
* Calculate the receive filter according to the
* operating mode and state :
*
* o always accept unicast , broadcast , and multicast traffic
* o maintain current state of phy error reception ( the hal
* may enable phy error frames for noise immunity work )
* o probe request frames are accepted only when operating in
* hostap , adhoc , or monitor modes
* o enable promiscuous mode according to the interface state
* o accept beacons :
* - when operating in adhoc mode so the 802.11 layer creates
* node table entries for peers ,
* - when operating in station mode for collecting rssi data when
* the station is otherwise quiet , or
* - when operating as a repeater so we see repeater - sta beacons
* - when scanning
*/
u32 ath_calcrxfilter ( struct ath_softc * sc )
{
# define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
2008-08-11 14:03:13 +05:30
2008-08-04 00:16:41 -07:00
u32 rfilt ;
rfilt = ( ath9k_hw_getrxfilter ( sc - > sc_ah ) & RX_FILTER_PRESERVE )
| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
| ATH9K_RX_FILTER_MCAST ;
2010-10-13 17:29:31 +03:00
if ( sc - > rx . rxfilter & FIF_PROBE_REQ )
2008-08-04 00:16:41 -07:00
rfilt | = ATH9K_RX_FILTER_PROBEREQ ;
2009-03-10 10:55:50 +02:00
/*
* Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
* mode interface or when in monitor mode . AP mode does not need this
* since it receives all in - BSS frames anyway .
*/
2011-03-09 01:48:12 +01:00
if ( sc - > sc_ah - > is_monitoring )
2008-08-04 00:16:41 -07:00
rfilt | = ATH9K_RX_FILTER_PROM ;
2009-02-04 08:10:22 +05:30
if ( sc - > rx . rxfilter & FIF_CONTROL )
rfilt | = ATH9K_RX_FILTER_CONTROL ;
2009-02-19 15:41:52 +05:30
if ( ( sc - > sc_ah - > opmode = = NL80211_IFTYPE_STATION ) & &
2010-09-14 12:00:22 -07:00
( sc - > nvifs < = 1 ) & &
2009-02-19 15:41:52 +05:30
! ( sc - > rx . rxfilter & FIF_BCN_PRBRESP_PROMISC ) )
rfilt | = ATH9K_RX_FILTER_MYBEACON ;
else
2008-08-04 00:16:41 -07:00
rfilt | = ATH9K_RX_FILTER_BEACON ;
2011-04-07 19:24:23 +02:00
if ( ( sc - > sc_ah - > opmode = = NL80211_IFTYPE_AP ) | |
2009-09-18 15:06:07 +05:30
( sc - > rx . rxfilter & FIF_PSPOLL ) )
2009-02-19 15:41:52 +05:30
rfilt | = ATH9K_RX_FILTER_PSPOLL ;
2008-11-18 09:05:55 +05:30
2009-09-03 12:08:43 +05:30
if ( conf_is_ht ( & sc - > hw - > conf ) )
rfilt | = ATH9K_RX_FILTER_COMP_BAR ;
2011-01-24 19:23:16 +01:00
if ( sc - > nvifs > 1 | | ( sc - > rx . rxfilter & FIF_OTHER_BSS ) ) {
2009-08-20 19:12:07 -07:00
/* The following may also be needed for other older chips */
if ( sc - > sc_ah - > hw_version . macVersion = = AR_SREV_VERSION_9160 )
rfilt | = ATH9K_RX_FILTER_PROM ;
2009-03-03 19:23:30 +02:00
rfilt | = ATH9K_RX_FILTER_MCAST_BCAST_ALL ;
}
2008-08-04 00:16:41 -07:00
return rfilt ;
2008-08-11 14:03:13 +05:30
2008-08-04 00:16:41 -07:00
# undef RX_FILTER_PRESERVE
}
int ath_startrecv ( struct ath_softc * sc )
{
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2008-08-04 00:16:41 -07:00
struct ath_buf * bf , * tbf ;
2010-04-15 17:38:48 -04:00
if ( ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA ) {
ath_edma_start_recv ( sc ) ;
return 0 ;
}
2008-12-07 21:44:03 +05:30
spin_lock_bh ( & sc - > rx . rxbuflock ) ;
if ( list_empty ( & sc - > rx . rxbuf ) )
2008-08-04 00:16:41 -07:00
goto start_recv ;
2008-12-07 21:44:03 +05:30
sc - > rx . rxlink = NULL ;
list_for_each_entry_safe ( bf , tbf , & sc - > rx . rxbuf , list ) {
2008-08-04 00:16:41 -07:00
ath_rx_buf_link ( sc , bf ) ;
}
/* We could have deleted elements so the list may be empty now */
2008-12-07 21:44:03 +05:30
if ( list_empty ( & sc - > rx . rxbuf ) )
2008-08-04 00:16:41 -07:00
goto start_recv ;
2008-12-07 21:44:03 +05:30
bf = list_first_entry ( & sc - > rx . rxbuf , struct ath_buf , list ) ;
2008-08-04 00:16:41 -07:00
ath9k_hw_putrxbuf ( ah , bf - > bf_daddr ) ;
2008-11-18 09:05:55 +05:30
ath9k_hw_rxena ( ah ) ;
2008-08-04 00:16:41 -07:00
start_recv :
2008-11-18 09:05:55 +05:30
ath_opmode_init ( sc ) ;
2010-09-16 15:12:28 -04:00
ath9k_hw_startpcureceive ( ah , ( sc - > sc_flags & SC_OP_OFFCHANNEL ) ) ;
2008-11-18 09:05:55 +05:30
2010-10-20 16:07:04 -07:00
spin_unlock_bh ( & sc - > rx . rxbuflock ) ;
2008-08-04 00:16:41 -07:00
return 0 ;
}
bool ath_stoprecv ( struct ath_softc * sc )
{
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2011-04-08 20:13:18 +02:00
bool stopped , reset = false ;
2008-08-04 00:16:41 -07:00
2010-10-20 16:07:03 -07:00
spin_lock_bh ( & sc - > rx . rxbuflock ) ;
2010-11-20 03:08:47 +01:00
ath9k_hw_abortpcurecv ( ah ) ;
2008-11-18 09:05:55 +05:30
ath9k_hw_setrxfilter ( ah , 0 ) ;
2011-04-08 20:13:18 +02:00
stopped = ath9k_hw_stopdmarecv ( ah , & reset ) ;
2010-04-15 17:38:48 -04:00
if ( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA )
ath_edma_stop_recv ( sc ) ;
else
sc - > rx . rxlink = NULL ;
2010-10-20 16:07:03 -07:00
spin_unlock_bh ( & sc - > rx . rxbuflock ) ;
2008-11-18 09:05:55 +05:30
2010-12-20 14:39:51 +05:30
if ( ! ( ah - > ah_flags & AH_UNPLUGGED ) & &
unlikely ( ! stopped ) ) {
2010-12-06 13:13:07 -08:00
ath_err ( ath9k_hw_common ( sc - > sc_ah ) ,
" Could not stop RX, we could be "
" confusing the DMA engine when we start RX up \n " ) ;
ATH_DBG_WARN_ON_ONCE ( ! stopped ) ;
}
2011-04-15 00:41:43 +02:00
return stopped & & ! reset ;
2008-08-04 00:16:41 -07:00
}
void ath_flushrecv ( struct ath_softc * sc )
{
2008-08-11 14:05:46 +05:30
sc - > sc_flags | = SC_OP_RXFLUSH ;
2010-04-15 17:38:48 -04:00
if ( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA )
ath_rx_tasklet ( sc , 1 , true ) ;
ath_rx_tasklet ( sc , 1 , false ) ;
2008-08-11 14:05:46 +05:30
sc - > sc_flags & = ~ SC_OP_RXFLUSH ;
2008-08-04 00:16:41 -07:00
}
2009-05-14 21:28:48 +03:00
static bool ath_beacon_dtim_pending_cab ( struct sk_buff * skb )
{
/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
struct ieee80211_mgmt * mgmt ;
u8 * pos , * end , id , elen ;
struct ieee80211_tim_ie * tim ;
mgmt = ( struct ieee80211_mgmt * ) skb - > data ;
pos = mgmt - > u . beacon . variable ;
end = skb - > data + skb - > len ;
while ( pos + 2 < end ) {
id = * pos + + ;
elen = * pos + + ;
if ( pos + elen > end )
break ;
if ( id = = WLAN_EID_TIM ) {
if ( elen < sizeof ( * tim ) )
break ;
tim = ( struct ieee80211_tim_ie * ) pos ;
if ( tim - > dtim_count ! = 0 )
break ;
return tim - > bitmap_ctrl & 0x01 ;
}
pos + = elen ;
}
return false ;
}
static void ath_rx_ps_beacon ( struct ath_softc * sc , struct sk_buff * skb )
{
struct ieee80211_mgmt * mgmt ;
2009-09-10 09:22:37 -07:00
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
2009-05-14 21:28:48 +03:00
if ( skb - > len < 24 + 8 + 2 + 2 )
return ;
mgmt = ( struct ieee80211_mgmt * ) skb - > data ;
2011-01-15 19:13:48 +00:00
if ( memcmp ( common - > curbssid , mgmt - > bssid , ETH_ALEN ) ! = 0 ) {
/* TODO: This doesn't work well if you have stations
* associated to two different APs because curbssid
* is just the last AP that any of the stations associated
* with .
*/
2009-05-14 21:28:48 +03:00
return ; /* not from our current AP */
2011-01-15 19:13:48 +00:00
}
2009-05-14 21:28:48 +03:00
2010-01-08 10:36:05 +05:30
sc - > ps_flags & = ~ PS_WAIT_FOR_BEACON ;
2009-06-19 12:17:48 +02:00
2010-01-08 10:36:05 +05:30
if ( sc - > ps_flags & PS_BEACON_SYNC ) {
sc - > ps_flags & = ~ PS_BEACON_SYNC ;
2010-12-02 19:12:37 -08:00
ath_dbg ( common , ATH_DBG_PS ,
" Reconfigure Beacon timers based on timestamp from the AP \n " ) ;
2011-04-04 22:56:19 +05:30
ath_set_beacon ( sc ) ;
2009-05-20 21:59:08 +03:00
}
2009-05-14 21:28:48 +03:00
if ( ath_beacon_dtim_pending_cab ( skb ) ) {
/*
* Remain awake waiting for buffered broadcast / multicast
2009-06-17 20:53:20 +02:00
* frames . If the last broadcast / multicast frame is not
* received properly , the next beacon frame will work as
* a backup trigger for returning into NETWORK SLEEP state ,
* so we are waiting for it as well .
2009-05-14 21:28:48 +03:00
*/
2010-12-02 19:12:37 -08:00
ath_dbg ( common , ATH_DBG_PS ,
" Received DTIM beacon indicating buffered broadcast/multicast frame(s) \n " ) ;
2010-01-08 10:36:05 +05:30
sc - > ps_flags | = PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON ;
2009-05-14 21:28:48 +03:00
return ;
}
2010-01-08 10:36:05 +05:30
if ( sc - > ps_flags & PS_WAIT_FOR_CAB ) {
2009-05-14 21:28:48 +03:00
/*
* This can happen if a broadcast frame is dropped or the AP
* fails to send a frame indicating that all CAB frames have
* been delivered .
*/
2010-01-08 10:36:05 +05:30
sc - > ps_flags & = ~ PS_WAIT_FOR_CAB ;
2010-12-02 19:12:37 -08:00
ath_dbg ( common , ATH_DBG_PS ,
" PS wait for CAB frames timed out \n " ) ;
2009-05-14 21:28:48 +03:00
}
}
static void ath_rx_ps ( struct ath_softc * sc , struct sk_buff * skb )
{
struct ieee80211_hdr * hdr ;
2009-09-13 02:42:02 -07:00
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
2009-05-14 21:28:48 +03:00
hdr = ( struct ieee80211_hdr * ) skb - > data ;
/* Process Beacon and CAB receive in PS state */
2010-05-22 23:58:13 -07:00
if ( ( ( sc - > ps_flags & PS_WAIT_FOR_BEACON ) | | ath9k_check_auto_sleep ( sc ) )
& & ieee80211_is_beacon ( hdr - > frame_control ) )
2009-05-14 21:28:48 +03:00
ath_rx_ps_beacon ( sc , skb ) ;
2010-01-08 10:36:05 +05:30
else if ( ( sc - > ps_flags & PS_WAIT_FOR_CAB ) & &
2009-05-14 21:28:48 +03:00
( ieee80211_is_data ( hdr - > frame_control ) | |
ieee80211_is_action ( hdr - > frame_control ) ) & &
is_multicast_ether_addr ( hdr - > addr1 ) & &
! ieee80211_has_moredata ( hdr - > frame_control ) ) {
/*
* No more broadcast / multicast frames to be received at this
* point .
*/
2010-09-16 15:12:35 -04:00
sc - > ps_flags & = ~ ( PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON ) ;
2010-12-02 19:12:37 -08:00
ath_dbg ( common , ATH_DBG_PS ,
" All PS CAB frames received, back to sleep \n " ) ;
2010-01-08 10:36:05 +05:30
} else if ( ( sc - > ps_flags & PS_WAIT_FOR_PSPOLL_DATA ) & &
2009-05-19 17:01:38 +03:00
! is_multicast_ether_addr ( hdr - > addr1 ) & &
! ieee80211_has_morefrags ( hdr - > frame_control ) ) {
2010-01-08 10:36:05 +05:30
sc - > ps_flags & = ~ PS_WAIT_FOR_PSPOLL_DATA ;
2010-12-02 19:12:37 -08:00
ath_dbg ( common , ATH_DBG_PS ,
" Going back to sleep after having received PS-Poll data (0x%lx) \n " ,
2010-01-08 10:36:05 +05:30
sc - > ps_flags & ( PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK ) ) ;
2009-05-14 21:28:48 +03:00
}
}
2010-04-15 17:38:48 -04:00
static bool ath_edma_get_buffers ( struct ath_softc * sc ,
enum ath9k_rx_qtype qtype )
2008-08-04 00:16:41 -07:00
{
2010-04-15 17:38:48 -04:00
struct ath_rx_edma * rx_edma = & sc - > rx . rx_edma [ qtype ] ;
struct ath_hw * ah = sc - > sc_ah ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
struct sk_buff * skb ;
struct ath_buf * bf ;
int ret ;
skb = skb_peek ( & rx_edma - > rx_fifo ) ;
if ( ! skb )
return false ;
bf = SKB_CB_ATHBUF ( skb ) ;
BUG_ON ( ! bf ) ;
2010-05-15 18:25:40 +08:00
dma_sync_single_for_cpu ( sc - > dev , bf - > bf_buf_addr ,
2010-04-15 17:38:48 -04:00
common - > rx_bufsize , DMA_FROM_DEVICE ) ;
ret = ath9k_hw_process_rxdesc_edma ( ah , NULL , skb - > data ) ;
2010-05-15 18:25:40 +08:00
if ( ret = = - EINPROGRESS ) {
/*let device gain the buffer again*/
dma_sync_single_for_device ( sc - > dev , bf - > bf_buf_addr ,
common - > rx_bufsize , DMA_FROM_DEVICE ) ;
2010-04-15 17:38:48 -04:00
return false ;
2010-05-15 18:25:40 +08:00
}
2010-04-15 17:38:48 -04:00
__skb_unlink ( skb , & rx_edma - > rx_fifo ) ;
if ( ret = = - EINVAL ) {
/* corrupt descriptor, skip this one and the following one */
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
ath_rx_edma_buf_link ( sc , qtype ) ;
skb = skb_peek ( & rx_edma - > rx_fifo ) ;
if ( ! skb )
return true ;
bf = SKB_CB_ATHBUF ( skb ) ;
BUG_ON ( ! bf ) ;
__skb_unlink ( skb , & rx_edma - > rx_fifo ) ;
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
ath_rx_edma_buf_link ( sc , qtype ) ;
2010-05-10 19:41:34 -07:00
return true ;
2010-04-15 17:38:48 -04:00
}
skb_queue_tail ( & rx_edma - > rx_buffers , skb ) ;
return true ;
}
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
static struct ath_buf * ath_edma_get_next_rx_buf ( struct ath_softc * sc ,
struct ath_rx_status * rs ,
enum ath9k_rx_qtype qtype )
{
struct ath_rx_edma * rx_edma = & sc - > rx . rx_edma [ qtype ] ;
struct sk_buff * skb ;
2008-11-18 09:05:55 +05:30
struct ath_buf * bf ;
2010-04-15 17:38:48 -04:00
while ( ath_edma_get_buffers ( sc , qtype ) ) ;
skb = __skb_dequeue ( & rx_edma - > rx_buffers ) ;
if ( ! skb )
return NULL ;
bf = SKB_CB_ATHBUF ( skb ) ;
ath9k_hw_process_rxdesc_edma ( sc - > sc_ah , rs , skb - > data ) ;
return bf ;
}
static struct ath_buf * ath_get_next_rx_buf ( struct ath_softc * sc ,
struct ath_rx_status * rs )
{
struct ath_hw * ah = sc - > sc_ah ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-08-04 00:16:41 -07:00
struct ath_desc * ds ;
2010-04-15 17:38:48 -04:00
struct ath_buf * bf ;
int ret ;
if ( list_empty ( & sc - > rx . rxbuf ) ) {
sc - > rx . rxlink = NULL ;
return NULL ;
}
bf = list_first_entry ( & sc - > rx . rxbuf , struct ath_buf , list ) ;
ds = bf - > bf_desc ;
/*
* Must provide the virtual address of the current
* descriptor , the physical address , and the virtual
* address of the next descriptor in the h / w chain .
* This allows the HAL to look ahead to see if the
* hardware is done with a descriptor by checking the
* done bit in the following descriptor and the address
* of the current descriptor the DMA engine is working
* on . All this is necessary because of our use of
* a self - linked list to avoid rx overruns .
*/
ret = ath9k_hw_rxprocdesc ( ah , ds , rs , 0 ) ;
if ( ret = = - EINPROGRESS ) {
struct ath_rx_status trs ;
struct ath_buf * tbf ;
struct ath_desc * tds ;
memset ( & trs , 0 , sizeof ( trs ) ) ;
if ( list_is_last ( & bf - > list , & sc - > rx . rxbuf ) ) {
sc - > rx . rxlink = NULL ;
return NULL ;
}
tbf = list_entry ( bf - > list . next , struct ath_buf , list ) ;
/*
* On some hardware the descriptor status words could
* get corrupted , including the done bit . Because of
* this , check if the next descriptor ' s done bit is
* set or not .
*
* If the next descriptor ' s done bit is set , the current
* descriptor has been corrupted . Force s / w to discard
* this descriptor and continue . . .
*/
tds = tbf - > bf_desc ;
ret = ath9k_hw_rxprocdesc ( ah , tds , & trs , 0 ) ;
if ( ret = = - EINPROGRESS )
return NULL ;
}
if ( ! bf - > bf_mpdu )
return bf ;
/*
* Synchronize the DMA transfer with CPU before
* 1. accessing the frame
* 2. requeueing the same buffer to h / w
*/
2010-05-15 18:25:40 +08:00
dma_sync_single_for_cpu ( sc - > dev , bf - > bf_buf_addr ,
2010-04-15 17:38:48 -04:00
common - > rx_bufsize ,
DMA_FROM_DEVICE ) ;
return bf ;
}
2010-05-20 15:34:38 +05:30
/* Assumes you've already done the endian to CPU conversion */
static bool ath9k_rx_accept ( struct ath_common * common ,
2010-05-20 14:34:46 -07:00
struct ieee80211_hdr * hdr ,
2010-05-20 15:34:38 +05:30
struct ieee80211_rx_status * rxs ,
struct ath_rx_status * rx_stats ,
bool * decrypt_error )
{
2011-07-13 23:35:05 +08:00
bool is_mc , is_valid_tkip , strip_mic , mic_error ;
2010-05-20 15:34:38 +05:30
struct ath_hw * ah = common - > ah ;
__le16 fc ;
2010-05-20 14:34:48 -07:00
u8 rx_status_len = ah - > caps . rx_status_len ;
2010-05-20 15:34:38 +05:30
fc = hdr - > frame_control ;
2011-07-13 23:35:05 +08:00
is_mc = ! ! is_multicast_ether_addr ( hdr - > addr1 ) ;
is_valid_tkip = rx_stats - > rs_keyix ! = ATH9K_RXKEYIX_INVALID & &
test_bit ( rx_stats - > rs_keyix , common - > tkip_keymap ) ;
strip_mic = is_valid_tkip & & ! ( rx_stats - > rs_status &
( ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC ) ) ;
2010-05-20 15:34:38 +05:30
if ( ! rx_stats - > rs_datalen )
return false ;
/*
* rs_status follows rs_datalen so if rs_datalen is too large
* we can take a hint that hardware corrupted it , so ignore
* those frames .
*/
2010-05-20 14:34:48 -07:00
if ( rx_stats - > rs_datalen > ( common - > rx_bufsize - rx_status_len ) )
2010-05-20 15:34:38 +05:30
return false ;
2011-01-26 18:23:27 +01:00
/* Only use error bits from the last fragment */
2010-05-20 15:34:38 +05:30
if ( rx_stats - > rs_more )
2011-01-26 18:23:27 +01:00
return true ;
2010-05-20 15:34:38 +05:30
2011-07-13 23:35:05 +08:00
mic_error = is_valid_tkip & & ! ieee80211_is_ctl ( fc ) & &
! ieee80211_has_morefrags ( fc ) & &
! ( le16_to_cpu ( hdr - > seq_ctrl ) & IEEE80211_SCTL_FRAG ) & &
( rx_stats - > rs_status & ATH9K_RXERR_MIC ) ;
2010-05-20 15:34:38 +05:30
/*
* The rx_stats - > rs_status will not be set until the end of the
* chained descriptors so it can be ignored if rs_more is set . The
* rs_more will be false at the last element of the chained
* descriptors .
*/
if ( rx_stats - > rs_status ! = 0 ) {
2011-07-13 23:35:05 +08:00
if ( rx_stats - > rs_status & ATH9K_RXERR_CRC ) {
2010-05-20 15:34:38 +05:30
rxs - > flag | = RX_FLAG_FAILED_FCS_CRC ;
2011-07-13 23:35:05 +08:00
mic_error = false ;
}
2010-05-20 15:34:38 +05:30
if ( rx_stats - > rs_status & ATH9K_RXERR_PHY )
return false ;
if ( rx_stats - > rs_status & ATH9K_RXERR_DECRYPT ) {
* decrypt_error = true ;
2011-07-13 23:35:05 +08:00
mic_error = false ;
2010-05-20 15:34:38 +05:30
}
2011-07-13 23:35:05 +08:00
2010-05-20 15:34:38 +05:30
/*
* Reject error frames with the exception of
* decryption and MIC failures . For monitor mode ,
* we also ignore the CRC error .
*/
2010-10-27 18:31:15 +05:30
if ( ah - > is_monitoring ) {
2010-05-20 15:34:38 +05:30
if ( rx_stats - > rs_status &
~ ( ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
ATH9K_RXERR_CRC ) )
return false ;
} else {
if ( rx_stats - > rs_status &
~ ( ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC ) ) {
return false ;
}
}
}
2011-07-13 23:35:05 +08:00
/*
* For unicast frames the MIC error bit can have false positives ,
* so all MIC error reports need to be validated in software .
* False negatives are not common , so skip software verification
* if the hardware considers the MIC valid .
*/
if ( strip_mic )
rxs - > flag | = RX_FLAG_MMIC_STRIPPED ;
else if ( is_mc & & mic_error )
rxs - > flag | = RX_FLAG_MMIC_ERROR ;
2010-05-20 15:34:38 +05:30
return true ;
}
static int ath9k_process_rate ( struct ath_common * common ,
struct ieee80211_hw * hw ,
struct ath_rx_status * rx_stats ,
2010-05-20 14:34:46 -07:00
struct ieee80211_rx_status * rxs )
2010-05-20 15:34:38 +05:30
{
struct ieee80211_supported_band * sband ;
enum ieee80211_band band ;
unsigned int i = 0 ;
band = hw - > conf . channel - > band ;
sband = hw - > wiphy - > bands [ band ] ;
if ( rx_stats - > rs_rate & 0x80 ) {
/* HT rate */
rxs - > flag | = RX_FLAG_HT ;
if ( rx_stats - > rs_flags & ATH9K_RX_2040 )
rxs - > flag | = RX_FLAG_40MHZ ;
if ( rx_stats - > rs_flags & ATH9K_RX_GI )
rxs - > flag | = RX_FLAG_SHORT_GI ;
rxs - > rate_idx = rx_stats - > rs_rate & 0x7f ;
return 0 ;
}
for ( i = 0 ; i < sband - > n_bitrates ; i + + ) {
if ( sband - > bitrates [ i ] . hw_value = = rx_stats - > rs_rate ) {
rxs - > rate_idx = i ;
return 0 ;
}
if ( sband - > bitrates [ i ] . hw_value_short = = rx_stats - > rs_rate ) {
rxs - > flag | = RX_FLAG_SHORTPRE ;
rxs - > rate_idx = i ;
return 0 ;
}
}
/*
* No valid hardware bitrate found - - we should not get here
* because hardware has already validated this frame as OK .
*/
2010-12-02 19:12:37 -08:00
ath_dbg ( common , ATH_DBG_XMIT ,
" unsupported hw bitrate detected 0x%02x using 1 Mbit \n " ,
rx_stats - > rs_rate ) ;
2010-05-20 15:34:38 +05:30
return - EINVAL ;
}
static void ath9k_process_rssi ( struct ath_common * common ,
struct ieee80211_hw * hw ,
2010-05-20 14:34:46 -07:00
struct ieee80211_hdr * hdr ,
2010-05-20 15:34:38 +05:30
struct ath_rx_status * rx_stats )
{
2011-01-24 19:23:18 +01:00
struct ath_softc * sc = hw - > priv ;
2010-05-20 15:34:38 +05:30
struct ath_hw * ah = common - > ah ;
2010-10-15 20:03:31 +02:00
int last_rssi ;
2010-05-20 15:34:38 +05:30
__le16 fc ;
2011-05-09 19:11:28 +05:30
if ( ( ah - > opmode ! = NL80211_IFTYPE_STATION ) & &
( ah - > opmode ! = NL80211_IFTYPE_ADHOC ) )
2010-10-15 20:03:31 +02:00
return ;
2010-05-20 15:34:38 +05:30
fc = hdr - > frame_control ;
2010-10-15 20:03:31 +02:00
if ( ! ieee80211_is_beacon ( fc ) | |
2011-01-15 19:13:48 +00:00
compare_ether_addr ( hdr - > addr3 , common - > curbssid ) ) {
/* TODO: This doesn't work well if you have stations
* associated to two different APs because curbssid
* is just the last AP that any of the stations associated
* with .
*/
2010-10-15 20:03:31 +02:00
return ;
2011-01-15 19:13:48 +00:00
}
2010-05-20 15:34:38 +05:30
2010-10-15 20:03:31 +02:00
if ( rx_stats - > rs_rssi ! = ATH9K_RSSI_BAD & & ! rx_stats - > rs_moreaggr )
2011-01-24 19:23:18 +01:00
ATH_RSSI_LPF ( sc - > last_rssi , rx_stats - > rs_rssi ) ;
2010-05-20 15:34:38 +05:30
2011-01-24 19:23:18 +01:00
last_rssi = sc - > last_rssi ;
2010-05-20 15:34:38 +05:30
if ( likely ( last_rssi ! = ATH_RSSI_DUMMY_MARKER ) )
rx_stats - > rs_rssi = ATH_EP_RND ( last_rssi ,
ATH_RSSI_EP_MULTIPLIER ) ;
if ( rx_stats - > rs_rssi < 0 )
rx_stats - > rs_rssi = 0 ;
/* Update Beacon RSSI, this is used by ANI. */
2010-10-15 20:03:31 +02:00
ah - > stats . avgbrssi = rx_stats - > rs_rssi ;
2010-05-20 15:34:38 +05:30
}
/*
* For Decrypt or Demic errors , we only mark packet status here and always push
* up the frame up to let mac80211 handle the actual error case , be it no
* decryption key or real decryption error . This let us keep statistics there .
*/
static int ath9k_rx_skb_preprocess ( struct ath_common * common ,
struct ieee80211_hw * hw ,
2010-05-20 14:34:46 -07:00
struct ieee80211_hdr * hdr ,
2010-05-20 15:34:38 +05:30
struct ath_rx_status * rx_stats ,
struct ieee80211_rx_status * rx_status ,
bool * decrypt_error )
{
2011-07-28 14:08:57 +02:00
struct ath_hw * ah = common - > ah ;
2010-05-20 15:34:38 +05:30
memset ( rx_status , 0 , sizeof ( struct ieee80211_rx_status ) ) ;
/*
* everything but the rate is checked here , the rate check is done
* separately to avoid doing two lookups for a rate for each frame .
*/
2010-05-20 14:34:46 -07:00
if ( ! ath9k_rx_accept ( common , hdr , rx_status , rx_stats , decrypt_error ) )
2010-05-20 15:34:38 +05:30
return - EINVAL ;
2011-01-26 18:23:27 +01:00
/* Only use status info from the last fragment */
if ( rx_stats - > rs_more )
return 0 ;
2010-05-20 14:34:46 -07:00
ath9k_process_rssi ( common , hw , hdr , rx_stats ) ;
2010-05-20 15:34:38 +05:30
2010-05-20 14:34:46 -07:00
if ( ath9k_process_rate ( common , hw , rx_stats , rx_status ) )
2010-05-20 15:34:38 +05:30
return - EINVAL ;
rx_status - > band = hw - > conf . channel - > band ;
rx_status - > freq = hw - > conf . channel - > center_freq ;
2011-07-28 14:08:57 +02:00
rx_status - > signal = ah - > noise + rx_stats - > rs_rssi ;
2010-05-20 15:34:38 +05:30
rx_status - > antenna = rx_stats - > rs_antenna ;
2011-02-23 15:06:08 +01:00
rx_status - > flag | = RX_FLAG_MACTIME_MPDU ;
2010-05-20 15:34:38 +05:30
return 0 ;
}
static void ath9k_rx_skb_postprocess ( struct ath_common * common ,
struct sk_buff * skb ,
struct ath_rx_status * rx_stats ,
struct ieee80211_rx_status * rxs ,
bool decrypt_error )
{
struct ath_hw * ah = common - > ah ;
struct ieee80211_hdr * hdr ;
int hdrlen , padpos , padsize ;
u8 keyix ;
__le16 fc ;
/* see if any padding is done by the hw and remove it */
hdr = ( struct ieee80211_hdr * ) skb - > data ;
hdrlen = ieee80211_get_hdrlen_from_skb ( skb ) ;
fc = hdr - > frame_control ;
padpos = ath9k_cmn_padpos ( hdr - > frame_control ) ;
/* The MAC header is padded to have 32-bit boundary if the
* packet payload is non - zero . The general calculation for
* padsize would take into account odd header lengths :
* padsize = ( 4 - padpos % 4 ) % 4 ; However , since only
* even - length headers are used , padding can only be 0 or 2
* bytes and we can optimize this a bit . In addition , we must
* not try to remove padding from short control frames that do
* not have payload . */
padsize = padpos & 3 ;
if ( padsize & & skb - > len > = padpos + padsize + FCS_LEN ) {
memmove ( skb - > data + padsize , skb - > data , padpos ) ;
skb_pull ( skb , padsize ) ;
}
keyix = rx_stats - > rs_keyix ;
if ( ! ( keyix = = ATH9K_RXKEYIX_INVALID ) & & ! decrypt_error & &
ieee80211_has_protected ( fc ) ) {
rxs - > flag | = RX_FLAG_DECRYPTED ;
} else if ( ieee80211_has_protected ( fc )
& & ! decrypt_error & & skb - > len > = hdrlen + 4 ) {
keyix = skb - > data [ hdrlen + 3 ] > > 6 ;
if ( test_bit ( keyix , common - > keymap ) )
rxs - > flag | = RX_FLAG_DECRYPTED ;
}
if ( ah - > sw_mgmt_crypto & &
( rxs - > flag & RX_FLAG_DECRYPTED ) & &
ieee80211_is_mgmt ( fc ) )
/* Use software decrypt for management frames. */
rxs - > flag & = ~ RX_FLAG_DECRYPTED ;
}
2010-04-15 17:38:48 -04:00
2010-09-02 01:34:43 -07:00
static void ath_lnaconf_alt_good_scan ( struct ath_ant_comb * antcomb ,
struct ath_hw_antcomb_conf ant_conf ,
int main_rssi_avg )
{
antcomb - > quick_scan_cnt = 0 ;
if ( ant_conf . main_lna_conf = = ATH_ANT_DIV_COMB_LNA2 )
antcomb - > rssi_lna2 = main_rssi_avg ;
else if ( ant_conf . main_lna_conf = = ATH_ANT_DIV_COMB_LNA1 )
antcomb - > rssi_lna1 = main_rssi_avg ;
switch ( ( ant_conf . main_lna_conf < < 4 ) | ant_conf . alt_lna_conf ) {
2011-06-21 11:23:45 +02:00
case 0x10 : /* LNA2 A-B */
2010-09-02 01:34:43 -07:00
antcomb - > main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2 ;
antcomb - > first_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2 ;
antcomb - > second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x20 : /* LNA1 A-B */
2010-09-02 01:34:43 -07:00
antcomb - > main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2 ;
antcomb - > first_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2 ;
antcomb - > second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x21 : /* LNA1 LNA2 */
2010-09-02 01:34:43 -07:00
antcomb - > main_conf = ATH_ANT_DIV_COMB_LNA2 ;
antcomb - > first_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2 ;
antcomb - > second_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x12 : /* LNA2 LNA1 */
2010-09-02 01:34:43 -07:00
antcomb - > main_conf = ATH_ANT_DIV_COMB_LNA1 ;
antcomb - > first_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2 ;
antcomb - > second_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x13 : /* LNA2 A+B */
2010-09-02 01:34:43 -07:00
antcomb - > main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2 ;
antcomb - > first_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2 ;
antcomb - > second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x23 : /* LNA1 A+B */
2010-09-02 01:34:43 -07:00
antcomb - > main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2 ;
antcomb - > first_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2 ;
antcomb - > second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2 ;
break ;
default :
break ;
}
}
static void ath_select_ant_div_from_quick_scan ( struct ath_ant_comb * antcomb ,
struct ath_hw_antcomb_conf * div_ant_conf ,
int main_rssi_avg , int alt_rssi_avg ,
int alt_ratio )
{
/* alt_good */
switch ( antcomb - > quick_scan_cnt ) {
case 0 :
/* set alt to main, and alt to first conf */
div_ant_conf - > main_lna_conf = antcomb - > main_conf ;
div_ant_conf - > alt_lna_conf = antcomb - > first_quick_scan_conf ;
break ;
case 1 :
/* set alt to main, and alt to first conf */
div_ant_conf - > main_lna_conf = antcomb - > main_conf ;
div_ant_conf - > alt_lna_conf = antcomb - > second_quick_scan_conf ;
antcomb - > rssi_first = main_rssi_avg ;
antcomb - > rssi_second = alt_rssi_avg ;
if ( antcomb - > main_conf = = ATH_ANT_DIV_COMB_LNA1 ) {
/* main is LNA1 */
if ( ath_is_alt_ant_ratio_better ( alt_ratio ,
ATH_ANT_DIV_COMB_LNA1_DELTA_HI ,
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW ,
main_rssi_avg , alt_rssi_avg ,
antcomb - > total_pkt_count ) )
antcomb - > first_ratio = true ;
else
antcomb - > first_ratio = false ;
} else if ( antcomb - > main_conf = = ATH_ANT_DIV_COMB_LNA2 ) {
if ( ath_is_alt_ant_ratio_better ( alt_ratio ,
ATH_ANT_DIV_COMB_LNA1_DELTA_MID ,
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW ,
main_rssi_avg , alt_rssi_avg ,
antcomb - > total_pkt_count ) )
antcomb - > first_ratio = true ;
else
antcomb - > first_ratio = false ;
} else {
if ( ( ( ( alt_ratio > = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 ) & &
( alt_rssi_avg > main_rssi_avg +
ATH_ANT_DIV_COMB_LNA1_DELTA_HI ) ) | |
( alt_rssi_avg > main_rssi_avg ) ) & &
( antcomb - > total_pkt_count > 50 ) )
antcomb - > first_ratio = true ;
else
antcomb - > first_ratio = false ;
}
break ;
case 2 :
antcomb - > alt_good = false ;
antcomb - > scan_not_start = false ;
antcomb - > scan = false ;
antcomb - > rssi_first = main_rssi_avg ;
antcomb - > rssi_third = alt_rssi_avg ;
if ( antcomb - > second_quick_scan_conf = = ATH_ANT_DIV_COMB_LNA1 )
antcomb - > rssi_lna1 = alt_rssi_avg ;
else if ( antcomb - > second_quick_scan_conf = =
ATH_ANT_DIV_COMB_LNA2 )
antcomb - > rssi_lna2 = alt_rssi_avg ;
else if ( antcomb - > second_quick_scan_conf = =
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2 ) {
if ( antcomb - > main_conf = = ATH_ANT_DIV_COMB_LNA2 )
antcomb - > rssi_lna2 = main_rssi_avg ;
else if ( antcomb - > main_conf = = ATH_ANT_DIV_COMB_LNA1 )
antcomb - > rssi_lna1 = main_rssi_avg ;
}
if ( antcomb - > rssi_lna2 > antcomb - > rssi_lna1 +
ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA )
div_ant_conf - > main_lna_conf = ATH_ANT_DIV_COMB_LNA2 ;
else
div_ant_conf - > main_lna_conf = ATH_ANT_DIV_COMB_LNA1 ;
if ( antcomb - > main_conf = = ATH_ANT_DIV_COMB_LNA1 ) {
if ( ath_is_alt_ant_ratio_better ( alt_ratio ,
ATH_ANT_DIV_COMB_LNA1_DELTA_HI ,
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW ,
main_rssi_avg , alt_rssi_avg ,
antcomb - > total_pkt_count ) )
antcomb - > second_ratio = true ;
else
antcomb - > second_ratio = false ;
} else if ( antcomb - > main_conf = = ATH_ANT_DIV_COMB_LNA2 ) {
if ( ath_is_alt_ant_ratio_better ( alt_ratio ,
ATH_ANT_DIV_COMB_LNA1_DELTA_MID ,
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW ,
main_rssi_avg , alt_rssi_avg ,
antcomb - > total_pkt_count ) )
antcomb - > second_ratio = true ;
else
antcomb - > second_ratio = false ;
} else {
if ( ( ( ( alt_ratio > = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 ) & &
( alt_rssi_avg > main_rssi_avg +
ATH_ANT_DIV_COMB_LNA1_DELTA_HI ) ) | |
( alt_rssi_avg > main_rssi_avg ) ) & &
( antcomb - > total_pkt_count > 50 ) )
antcomb - > second_ratio = true ;
else
antcomb - > second_ratio = false ;
}
/* set alt to the conf with maximun ratio */
if ( antcomb - > first_ratio & & antcomb - > second_ratio ) {
if ( antcomb - > rssi_second > antcomb - > rssi_third ) {
/* first alt*/
if ( ( antcomb - > first_quick_scan_conf = =
ATH_ANT_DIV_COMB_LNA1 ) | |
( antcomb - > first_quick_scan_conf = =
ATH_ANT_DIV_COMB_LNA2 ) )
/* Set alt LNA1 or LNA2*/
if ( div_ant_conf - > main_lna_conf = =
ATH_ANT_DIV_COMB_LNA2 )
div_ant_conf - > alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
else
div_ant_conf - > alt_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
else
/* Set alt to A+B or A-B */
div_ant_conf - > alt_lna_conf =
antcomb - > first_quick_scan_conf ;
} else if ( ( antcomb - > second_quick_scan_conf = =
ATH_ANT_DIV_COMB_LNA1 ) | |
( antcomb - > second_quick_scan_conf = =
ATH_ANT_DIV_COMB_LNA2 ) ) {
/* Set alt LNA1 or LNA2 */
if ( div_ant_conf - > main_lna_conf = =
ATH_ANT_DIV_COMB_LNA2 )
div_ant_conf - > alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
else
div_ant_conf - > alt_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
} else {
/* Set alt to A+B or A-B */
div_ant_conf - > alt_lna_conf =
antcomb - > second_quick_scan_conf ;
}
} else if ( antcomb - > first_ratio ) {
/* first alt */
if ( ( antcomb - > first_quick_scan_conf = =
ATH_ANT_DIV_COMB_LNA1 ) | |
( antcomb - > first_quick_scan_conf = =
ATH_ANT_DIV_COMB_LNA2 ) )
/* Set alt LNA1 or LNA2 */
if ( div_ant_conf - > main_lna_conf = =
ATH_ANT_DIV_COMB_LNA2 )
div_ant_conf - > alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
else
div_ant_conf - > alt_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
else
/* Set alt to A+B or A-B */
div_ant_conf - > alt_lna_conf =
antcomb - > first_quick_scan_conf ;
} else if ( antcomb - > second_ratio ) {
/* second alt */
if ( ( antcomb - > second_quick_scan_conf = =
ATH_ANT_DIV_COMB_LNA1 ) | |
( antcomb - > second_quick_scan_conf = =
ATH_ANT_DIV_COMB_LNA2 ) )
/* Set alt LNA1 or LNA2 */
if ( div_ant_conf - > main_lna_conf = =
ATH_ANT_DIV_COMB_LNA2 )
div_ant_conf - > alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
else
div_ant_conf - > alt_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
else
/* Set alt to A+B or A-B */
div_ant_conf - > alt_lna_conf =
antcomb - > second_quick_scan_conf ;
} else {
/* main is largest */
if ( ( antcomb - > main_conf = = ATH_ANT_DIV_COMB_LNA1 ) | |
( antcomb - > main_conf = = ATH_ANT_DIV_COMB_LNA2 ) )
/* Set alt LNA1 or LNA2 */
if ( div_ant_conf - > main_lna_conf = =
ATH_ANT_DIV_COMB_LNA2 )
div_ant_conf - > alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
else
div_ant_conf - > alt_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
else
/* Set alt to A+B or A-B */
div_ant_conf - > alt_lna_conf = antcomb - > main_conf ;
}
break ;
default :
break ;
}
}
2011-05-13 20:31:23 +05:30
static void ath_ant_div_conf_fast_divbias ( struct ath_hw_antcomb_conf * ant_conf ,
struct ath_ant_comb * antcomb , int alt_ratio )
2010-09-02 01:34:43 -07:00
{
2011-05-13 20:31:23 +05:30
if ( ant_conf - > div_group = = 0 ) {
/* Adjust the fast_div_bias based on main and alt lna conf */
switch ( ( ant_conf - > main_lna_conf < < 4 ) |
ant_conf - > alt_lna_conf ) {
2011-06-21 11:23:45 +02:00
case 0x01 : /* A-B LNA2 */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x3b ;
break ;
2011-06-21 11:23:45 +02:00
case 0x02 : /* A-B LNA1 */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x3d ;
break ;
2011-06-21 11:23:45 +02:00
case 0x03 : /* A-B A+B */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x1 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x10 : /* LNA2 A-B */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x7 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x12 : /* LNA2 LNA1 */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x2 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x13 : /* LNA2 A+B */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x7 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x20 : /* LNA1 A-B */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x6 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x21 : /* LNA1 LNA2 */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x0 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x23 : /* LNA1 A+B */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x6 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x30 : /* A+B A-B */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x1 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x31 : /* A+B LNA2 */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x3b ;
break ;
2011-06-21 11:23:45 +02:00
case 0x32 : /* A+B LNA1 */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x3d ;
break ;
default :
break ;
}
2011-06-21 11:23:46 +02:00
} else if ( ant_conf - > div_group = = 1 ) {
/* Adjust the fast_div_bias based on main and alt_lna_conf */
switch ( ( ant_conf - > main_lna_conf < < 4 ) |
ant_conf - > alt_lna_conf ) {
case 0x01 : /* A-B LNA2 */
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
case 0x02 : /* A-B LNA1 */
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
case 0x03 : /* A-B A+B */
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
case 0x10 : /* LNA2 A-B */
if ( ! ( antcomb - > scan ) & &
( alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO ) )
ant_conf - > fast_div_bias = 0x3f ;
else
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
case 0x12 : /* LNA2 LNA1 */
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
case 0x13 : /* LNA2 A+B */
if ( ! ( antcomb - > scan ) & &
( alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO ) )
ant_conf - > fast_div_bias = 0x3f ;
else
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
case 0x20 : /* LNA1 A-B */
if ( ! ( antcomb - > scan ) & &
( alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO ) )
ant_conf - > fast_div_bias = 0x3f ;
else
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
case 0x21 : /* LNA1 LNA2 */
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
case 0x23 : /* LNA1 A+B */
if ( ! ( antcomb - > scan ) & &
( alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO ) )
ant_conf - > fast_div_bias = 0x3f ;
else
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
case 0x30 : /* A+B A-B */
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
case 0x31 : /* A+B LNA2 */
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
case 0x32 : /* A+B LNA1 */
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
default :
break ;
}
2011-05-13 20:31:23 +05:30
} else if ( ant_conf - > div_group = = 2 ) {
/* Adjust the fast_div_bias based on main and alt_lna_conf */
switch ( ( ant_conf - > main_lna_conf < < 4 ) |
ant_conf - > alt_lna_conf ) {
2011-06-21 11:23:45 +02:00
case 0x01 : /* A-B LNA2 */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x02 : /* A-B LNA1 */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x03 : /* A-B A+B */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x10 : /* LNA2 A-B */
2011-05-13 20:31:23 +05:30
if ( ! ( antcomb - > scan ) & &
( alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO ) )
ant_conf - > fast_div_bias = 0x1 ;
else
ant_conf - > fast_div_bias = 0x2 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x12 : /* LNA2 LNA1 */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x13 : /* LNA2 A+B */
2011-05-13 20:31:23 +05:30
if ( ! ( antcomb - > scan ) & &
( alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO ) )
ant_conf - > fast_div_bias = 0x1 ;
else
ant_conf - > fast_div_bias = 0x2 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x20 : /* LNA1 A-B */
2011-05-13 20:31:23 +05:30
if ( ! ( antcomb - > scan ) & &
( alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO ) )
ant_conf - > fast_div_bias = 0x1 ;
else
ant_conf - > fast_div_bias = 0x2 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x21 : /* LNA1 LNA2 */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x23 : /* LNA1 A+B */
2011-05-13 20:31:23 +05:30
if ( ! ( antcomb - > scan ) & &
( alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO ) )
ant_conf - > fast_div_bias = 0x1 ;
else
ant_conf - > fast_div_bias = 0x2 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x30 : /* A+B A-B */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x31 : /* A+B LNA2 */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
2011-06-21 11:23:45 +02:00
case 0x32 : /* A+B LNA1 */
2011-05-13 20:31:23 +05:30
ant_conf - > fast_div_bias = 0x1 ;
ant_conf - > main_gaintb = 0 ;
ant_conf - > alt_gaintb = 0 ;
break ;
default :
break ;
}
2010-09-02 01:34:43 -07:00
}
}
/* Antenna diversity and combining */
static void ath_ant_comb_scan ( struct ath_softc * sc , struct ath_rx_status * rs )
{
struct ath_hw_antcomb_conf div_ant_conf ;
struct ath_ant_comb * antcomb = & sc - > ant_comb ;
int alt_ratio = 0 , alt_rssi_avg = 0 , main_rssi_avg = 0 , curr_alt_set ;
2011-04-20 11:00:34 +05:30
int curr_main_set ;
2010-09-02 01:34:43 -07:00
int main_rssi = rs - > rs_rssi_ctl0 ;
int alt_rssi = rs - > rs_rssi_ctl1 ;
int rx_ant_conf , main_ant_conf ;
bool short_scan = false ;
rx_ant_conf = ( rs - > rs_rssi_ctl2 > > ATH_ANT_RX_CURRENT_SHIFT ) &
ATH_ANT_RX_MASK ;
main_ant_conf = ( rs - > rs_rssi_ctl2 > > ATH_ANT_RX_MAIN_SHIFT ) &
ATH_ANT_RX_MASK ;
2011-05-13 20:31:40 +05:30
/* Record packet only when both main_rssi and alt_rssi is positive */
if ( main_rssi > 0 & & alt_rssi > 0 ) {
2010-09-02 01:34:43 -07:00
antcomb - > total_pkt_count + + ;
antcomb - > main_total_rssi + = main_rssi ;
antcomb - > alt_total_rssi + = alt_rssi ;
if ( main_ant_conf = = rx_ant_conf )
antcomb - > main_recv_cnt + + ;
else
antcomb - > alt_recv_cnt + + ;
}
/* Short scan check */
if ( antcomb - > scan & & antcomb - > alt_good ) {
if ( time_after ( jiffies , antcomb - > scan_start_time +
msecs_to_jiffies ( ATH_ANT_DIV_COMB_SHORT_SCAN_INTR ) ) )
short_scan = true ;
else
if ( antcomb - > total_pkt_count = =
ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT ) {
alt_ratio = ( ( antcomb - > alt_recv_cnt * 100 ) /
antcomb - > total_pkt_count ) ;
if ( alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO )
short_scan = true ;
}
}
if ( ( ( antcomb - > total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT ) | |
rs - > rs_moreaggr ) & & ! short_scan )
return ;
if ( antcomb - > total_pkt_count ) {
alt_ratio = ( ( antcomb - > alt_recv_cnt * 100 ) /
antcomb - > total_pkt_count ) ;
main_rssi_avg = ( antcomb - > main_total_rssi /
antcomb - > total_pkt_count ) ;
alt_rssi_avg = ( antcomb - > alt_total_rssi /
antcomb - > total_pkt_count ) ;
}
ath9k_hw_antdiv_comb_conf_get ( sc - > sc_ah , & div_ant_conf ) ;
curr_alt_set = div_ant_conf . alt_lna_conf ;
curr_main_set = div_ant_conf . main_lna_conf ;
antcomb - > count + + ;
if ( antcomb - > count = = ATH_ANT_DIV_COMB_MAX_COUNT ) {
if ( alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO ) {
ath_lnaconf_alt_good_scan ( antcomb , div_ant_conf ,
main_rssi_avg ) ;
antcomb - > alt_good = true ;
} else {
antcomb - > alt_good = false ;
}
antcomb - > count = 0 ;
antcomb - > scan = true ;
antcomb - > scan_not_start = true ;
}
if ( ! antcomb - > scan ) {
2011-05-13 20:31:09 +05:30
if ( ath_ant_div_comb_alt_check ( div_ant_conf . div_group ,
alt_ratio , curr_main_set , curr_alt_set ,
alt_rssi_avg , main_rssi_avg ) ) {
2010-09-02 01:34:43 -07:00
if ( curr_alt_set = = ATH_ANT_DIV_COMB_LNA2 ) {
/* Switch main and alt LNA */
div_ant_conf . main_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
} else if ( curr_alt_set = = ATH_ANT_DIV_COMB_LNA1 ) {
div_ant_conf . main_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
}
goto div_comb_done ;
} else if ( ( curr_alt_set ! = ATH_ANT_DIV_COMB_LNA1 ) & &
( curr_alt_set ! = ATH_ANT_DIV_COMB_LNA2 ) ) {
/* Set alt to another LNA */
if ( curr_main_set = = ATH_ANT_DIV_COMB_LNA2 )
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
else if ( curr_main_set = = ATH_ANT_DIV_COMB_LNA1 )
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
goto div_comb_done ;
}
if ( ( alt_rssi_avg < ( main_rssi_avg +
2011-05-13 20:30:56 +05:30
div_ant_conf . lna1_lna2_delta ) ) )
2010-09-02 01:34:43 -07:00
goto div_comb_done ;
}
if ( ! antcomb - > scan_not_start ) {
switch ( curr_alt_set ) {
case ATH_ANT_DIV_COMB_LNA2 :
antcomb - > rssi_lna2 = alt_rssi_avg ;
antcomb - > rssi_lna1 = main_rssi_avg ;
antcomb - > scan = true ;
/* set to A+B */
div_ant_conf . main_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2 ;
break ;
case ATH_ANT_DIV_COMB_LNA1 :
antcomb - > rssi_lna1 = alt_rssi_avg ;
antcomb - > rssi_lna2 = main_rssi_avg ;
antcomb - > scan = true ;
/* set to A+B */
div_ant_conf . main_lna_conf = ATH_ANT_DIV_COMB_LNA2 ;
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2 ;
break ;
case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2 :
antcomb - > rssi_add = alt_rssi_avg ;
antcomb - > scan = true ;
/* set to A-B */
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2 ;
break ;
case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2 :
antcomb - > rssi_sub = alt_rssi_avg ;
antcomb - > scan = false ;
if ( antcomb - > rssi_lna2 >
( antcomb - > rssi_lna1 +
ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA ) ) {
/* use LNA2 as main LNA */
if ( ( antcomb - > rssi_add > antcomb - > rssi_lna1 ) & &
( antcomb - > rssi_add > antcomb - > rssi_sub ) ) {
/* set to A+B */
div_ant_conf . main_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2 ;
} else if ( antcomb - > rssi_sub >
antcomb - > rssi_lna1 ) {
/* set to A-B */
div_ant_conf . main_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2 ;
} else {
/* set to LNA1 */
div_ant_conf . main_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
}
} else {
/* use LNA1 as main LNA */
if ( ( antcomb - > rssi_add > antcomb - > rssi_lna2 ) & &
( antcomb - > rssi_add > antcomb - > rssi_sub ) ) {
/* set to A+B */
div_ant_conf . main_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2 ;
} else if ( antcomb - > rssi_sub >
antcomb - > rssi_lna1 ) {
/* set to A-B */
div_ant_conf . main_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2 ;
} else {
/* set to LNA2 */
div_ant_conf . main_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
}
}
break ;
default :
break ;
}
} else {
if ( ! antcomb - > alt_good ) {
antcomb - > scan_not_start = false ;
/* Set alt to another LNA */
if ( curr_main_set = = ATH_ANT_DIV_COMB_LNA2 ) {
div_ant_conf . main_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
} else if ( curr_main_set = = ATH_ANT_DIV_COMB_LNA1 ) {
div_ant_conf . main_lna_conf =
ATH_ANT_DIV_COMB_LNA1 ;
div_ant_conf . alt_lna_conf =
ATH_ANT_DIV_COMB_LNA2 ;
}
goto div_comb_done ;
}
}
ath_select_ant_div_from_quick_scan ( antcomb , & div_ant_conf ,
main_rssi_avg , alt_rssi_avg ,
alt_ratio ) ;
antcomb - > quick_scan_cnt + + ;
div_comb_done :
2011-05-13 20:31:23 +05:30
ath_ant_div_conf_fast_divbias ( & div_ant_conf , antcomb , alt_ratio ) ;
2010-09-02 01:34:43 -07:00
ath9k_hw_antdiv_comb_conf_set ( sc - > sc_ah , & div_ant_conf ) ;
antcomb - > scan_start_time = jiffies ;
antcomb - > total_pkt_count = 0 ;
antcomb - > main_total_rssi = 0 ;
antcomb - > alt_total_rssi = 0 ;
antcomb - > main_recv_cnt = 0 ;
antcomb - > alt_recv_cnt = 0 ;
}
2010-04-15 17:38:48 -04:00
int ath_rx_tasklet ( struct ath_softc * sc , int flush , bool hp )
{
struct ath_buf * bf ;
2011-01-26 18:23:27 +01:00
struct sk_buff * skb = NULL , * requeue_skb , * hdr_skb ;
2009-11-04 08:20:42 -08:00
struct ieee80211_rx_status * rxs ;
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2009-09-10 11:08:14 -07:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2011-01-24 19:23:16 +01:00
struct ieee80211_hw * hw = sc - > hw ;
2008-11-18 09:05:55 +05:30
struct ieee80211_hdr * hdr ;
2009-11-04 16:47:22 -08:00
int retval ;
2008-11-18 09:05:55 +05:30
bool decrypt_error = false ;
2010-03-29 20:14:23 -07:00
struct ath_rx_status rs ;
2010-04-15 17:38:48 -04:00
enum ath9k_rx_qtype qtype ;
bool edma = ! ! ( ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA ) ;
int dma_type ;
2010-05-20 14:34:47 -07:00
u8 rx_status_len = ah - > caps . rx_status_len ;
2010-06-12 00:33:54 -04:00
u64 tsf = 0 ;
u32 tsf_lower = 0 ;
2010-09-16 15:12:26 -04:00
unsigned long flags ;
2008-11-18 09:05:55 +05:30
2010-04-15 17:38:48 -04:00
if ( edma )
dma_type = DMA_BIDIRECTIONAL ;
2010-05-14 21:15:38 +08:00
else
dma_type = DMA_FROM_DEVICE ;
2010-04-15 17:38:48 -04:00
qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP ;
2008-12-07 21:44:03 +05:30
spin_lock_bh ( & sc - > rx . rxbuflock ) ;
2008-08-04 00:16:41 -07:00
2010-06-12 00:33:54 -04:00
tsf = ath9k_hw_gettsf64 ( ah ) ;
tsf_lower = tsf & 0xffffffff ;
2008-08-04 00:16:41 -07:00
do {
/* If handling rx interrupt and flush is in progress => exit */
2008-08-11 14:05:46 +05:30
if ( ( sc - > sc_flags & SC_OP_RXFLUSH ) & & ( flush = = 0 ) )
2008-08-04 00:16:41 -07:00
break ;
2010-03-29 20:14:23 -07:00
memset ( & rs , 0 , sizeof ( rs ) ) ;
2010-04-15 17:38:48 -04:00
if ( edma )
bf = ath_edma_get_next_rx_buf ( sc , & rs , qtype ) ;
else
bf = ath_get_next_rx_buf ( sc , & rs ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
if ( ! bf )
break ;
2008-08-04 00:16:41 -07:00
skb = bf - > bf_mpdu ;
2008-11-18 09:05:55 +05:30
if ( ! skb )
2008-08-04 00:16:41 -07:00
continue ;
2011-01-26 18:23:27 +01:00
/*
* Take frame header from the first fragment and RX status from
* the last one .
*/
if ( sc - > rx . frag )
hdr_skb = sc - > rx . frag ;
else
hdr_skb = skb ;
hdr = ( struct ieee80211_hdr * ) ( hdr_skb - > data + rx_status_len ) ;
rxs = IEEE80211_SKB_RXCB ( hdr_skb ) ;
2009-11-04 08:20:42 -08:00
2010-03-29 20:14:23 -07:00
ath_debug_stat_rx ( sc , & rs ) ;
2010-01-08 10:36:11 +05:30
2008-08-04 00:16:41 -07:00
/*
2008-11-18 09:05:55 +05:30
* If we ' re asked to flush receive queue , directly
* chain it back at the queue without processing it .
2008-08-04 00:16:41 -07:00
*/
2008-11-18 09:05:55 +05:30
if ( flush )
2011-01-26 18:23:27 +01:00
goto requeue_drop_frag ;
2008-08-04 00:16:41 -07:00
2010-08-02 23:55:50 +02:00
retval = ath9k_rx_skb_preprocess ( common , hw , hdr , & rs ,
rxs , & decrypt_error ) ;
if ( retval )
2011-01-26 18:23:27 +01:00
goto requeue_drop_frag ;
2010-08-02 23:55:50 +02:00
2010-06-12 00:33:54 -04:00
rxs - > mactime = ( tsf & ~ 0xffffffffULL ) | rs . rs_tstamp ;
if ( rs . rs_tstamp > tsf_lower & &
unlikely ( rs . rs_tstamp - tsf_lower > 0x10000000 ) )
rxs - > mactime - = 0x100000000ULL ;
if ( rs . rs_tstamp < tsf_lower & &
unlikely ( tsf_lower - rs . rs_tstamp > 0x10000000 ) )
rxs - > mactime + = 0x100000000ULL ;
2008-11-21 17:41:33 -08:00
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer ' s skb */
2009-11-04 09:11:34 -08:00
requeue_skb = ath_rxbuf_alloc ( common , common - > rx_bufsize , GFP_ATOMIC ) ;
2008-11-21 17:41:33 -08:00
/* If there is no memory we ignore the current RX'd frame,
* tell hardware it can give us a new frame using the old
2008-12-07 21:44:03 +05:30
* skb and put it at the tail of the sc - > rx . rxbuf list for
2008-11-21 17:41:33 -08:00
* processing . */
if ( ! requeue_skb )
2011-01-26 18:23:27 +01:00
goto requeue_drop_frag ;
2008-08-04 00:16:41 -07:00
2008-12-15 20:40:46 +05:30
/* Unmap the frame */
2009-01-14 20:17:03 +01:00
dma_unmap_single ( sc - > dev , bf - > bf_buf_addr ,
2009-11-04 09:11:34 -08:00
common - > rx_bufsize ,
2010-04-15 17:38:48 -04:00
dma_type ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
skb_put ( skb , rs . rs_datalen + ah - > caps . rx_status_len ) ;
if ( ah - > caps . rx_status_len )
skb_pull ( skb , ah - > caps . rx_status_len ) ;
2008-11-18 09:05:55 +05:30
2011-01-26 18:23:27 +01:00
if ( ! rs . rs_more )
ath9k_rx_skb_postprocess ( common , hdr_skb , & rs ,
rxs , decrypt_error ) ;
2008-11-18 09:05:55 +05:30
2008-11-21 17:41:33 -08:00
/* We will now give hardware our shiny new allocated skb */
bf - > bf_mpdu = requeue_skb ;
2009-01-14 20:17:03 +01:00
bf - > bf_buf_addr = dma_map_single ( sc - > dev , requeue_skb - > data ,
2009-11-04 09:11:34 -08:00
common - > rx_bufsize ,
2010-04-15 17:38:48 -04:00
dma_type ) ;
2009-01-14 20:17:03 +01:00
if ( unlikely ( dma_mapping_error ( sc - > dev ,
2008-12-03 03:35:29 -08:00
bf - > bf_buf_addr ) ) ) {
dev_kfree_skb_any ( requeue_skb ) ;
bf - > bf_mpdu = NULL ;
2010-10-14 12:45:30 -07:00
bf - > bf_buf_addr = 0 ;
2010-12-02 19:12:36 -08:00
ath_err ( common , " dma_mapping_error() on RX \n " ) ;
2011-01-24 19:23:16 +01:00
ieee80211_rx ( hw , skb ) ;
2008-12-03 03:35:29 -08:00
break ;
}
2008-08-04 00:16:41 -07:00
2011-01-26 18:23:27 +01:00
if ( rs . rs_more ) {
/*
* rs_more indicates chained descriptors which can be
* used to link buffers together for a sort of
* scatter - gather operation .
*/
if ( sc - > rx . frag ) {
/* too many fragments - cannot handle frame */
dev_kfree_skb_any ( sc - > rx . frag ) ;
dev_kfree_skb_any ( skb ) ;
skb = NULL ;
}
sc - > rx . frag = skb ;
goto requeue ;
}
if ( sc - > rx . frag ) {
int space = skb - > len - skb_tailroom ( hdr_skb ) ;
sc - > rx . frag = NULL ;
if ( pskb_expand_head ( hdr_skb , 0 , space , GFP_ATOMIC ) < 0 ) {
dev_kfree_skb ( skb ) ;
goto requeue_drop_frag ;
}
skb_copy_from_linear_data ( skb , skb_put ( hdr_skb , skb - > len ) ,
skb - > len ) ;
dev_kfree_skb_any ( skb ) ;
skb = hdr_skb ;
}
2008-08-04 00:16:41 -07:00
/*
* change the default rx antenna if rx diversity chooses the
* other antenna 3 times in a row .
*/
2010-03-29 20:14:23 -07:00
if ( sc - > rx . defant ! = rs . rs_antenna ) {
2008-12-07 21:44:03 +05:30
if ( + + sc - > rx . rxotherant > = 3 )
2010-03-29 20:14:23 -07:00
ath_setdefantenna ( sc , rs . rs_antenna ) ;
2008-08-04 00:16:41 -07:00
} else {
2008-12-07 21:44:03 +05:30
sc - > rx . rxotherant = 0 ;
2008-08-04 00:16:41 -07:00
}
2009-01-20 11:17:08 +05:30
2011-07-13 23:35:05 +08:00
if ( rxs - > flag & RX_FLAG_MMIC_STRIPPED )
skb_trim ( skb , skb - > len - 8 ) ;
2010-09-16 15:12:26 -04:00
spin_lock_irqsave ( & sc - > sc_pm_lock , flags ) ;
2010-12-07 20:40:58 +05:30
if ( ( sc - > ps_flags & ( PS_WAIT_FOR_BEACON |
2010-05-22 23:58:13 -07:00
PS_WAIT_FOR_CAB |
2010-12-07 20:40:58 +05:30
PS_WAIT_FOR_PSPOLL_DATA ) ) | |
2011-04-22 13:12:23 +05:30
ath9k_check_auto_sleep ( sc ) )
2009-05-14 21:28:48 +03:00
ath_rx_ps ( sc , skb ) ;
2010-09-16 15:12:26 -04:00
spin_unlock_irqrestore ( & sc - > sc_pm_lock , flags ) ;
2009-05-14 21:28:48 +03:00
2010-09-02 01:34:43 -07:00
if ( ah - > caps . hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB )
ath_ant_comb_scan ( sc , & rs ) ;
2011-01-24 19:23:16 +01:00
ieee80211_rx ( hw , skb ) ;
2009-05-14 21:28:48 +03:00
2011-01-26 18:23:27 +01:00
requeue_drop_frag :
if ( sc - > rx . frag ) {
dev_kfree_skb_any ( sc - > rx . frag ) ;
sc - > rx . frag = NULL ;
}
2008-11-21 17:41:33 -08:00
requeue :
2010-04-15 17:38:48 -04:00
if ( edma ) {
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
ath_rx_edma_buf_link ( sc , qtype ) ;
} else {
list_move_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
ath_rx_buf_link ( sc , bf ) ;
2011-04-07 19:30:32 +02:00
ath9k_hw_rxena ( ah ) ;
2010-04-15 17:38:48 -04:00
}
2008-11-18 09:05:55 +05:30
} while ( 1 ) ;
2008-12-07 21:44:03 +05:30
spin_unlock_bh ( & sc - > rx . rxbuflock ) ;
2008-08-04 00:16:41 -07:00
2011-08-13 10:28:10 +05:30
if ( ! ( ah - > imask & ATH9K_INT_RXEOL ) ) {
ah - > imask | = ( ATH9K_INT_RXEOL | ATH9K_INT_RXORN ) ;
ath9k_hw_set_interrupts ( ah , ah - > imask ) ;
}
2008-08-04 00:16:41 -07:00
return 0 ;
}