2008-08-04 00:16:41 -07:00
/*
2009-03-13 09:07:23 +05:30
* Copyright ( c ) 2008 - 2009 Atheros Communications Inc .
2008-08-04 00:16:41 -07:00
*
* Permission to use , copy , modify , and / or distribute this software for any
* purpose with or without fee is hereby granted , provided that the above
* copyright notice and this permission notice appear in all copies .
*
* THE SOFTWARE IS PROVIDED " AS IS " AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS . IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL , DIRECT , INDIRECT , OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
* ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE .
*/
2009-02-09 13:26:54 +05:30
# include "ath9k.h"
2010-04-15 17:39:28 -04:00
# include "ar9003_mac.h"
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
# define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
2010-05-22 23:58:13 -07:00
static inline bool ath9k_check_auto_sleep ( struct ath_softc * sc )
{
return sc - > ps_enabled & &
( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_AUTOSLEEP ) ;
}
2009-03-03 19:23:28 +02:00
static struct ieee80211_hw * ath_get_virt_hw ( struct ath_softc * sc ,
struct ieee80211_hdr * hdr )
{
2009-03-03 19:23:29 +02:00
struct ieee80211_hw * hw = sc - > pri_wiphy - > hw ;
int i ;
spin_lock_bh ( & sc - > wiphy_lock ) ;
for ( i = 0 ; i < sc - > num_sec_wiphy ; i + + ) {
struct ath_wiphy * aphy = sc - > sec_wiphy [ i ] ;
if ( aphy = = NULL )
continue ;
if ( compare_ether_addr ( hdr - > addr1 , aphy - > hw - > wiphy - > perm_addr )
= = 0 ) {
hw = aphy - > hw ;
break ;
}
}
spin_unlock_bh ( & sc - > wiphy_lock ) ;
return hw ;
2009-03-03 19:23:28 +02:00
}
2008-08-04 00:16:41 -07:00
/*
* Setup and link descriptors .
*
* 11 N : we can no longer afford to self link the last descriptor .
* MAC acknowledges BA status as long as it copies frames to host
* buffer ( or rx fifo ) . This can incorrectly acknowledge packets
* to a sender if last desc is self - linked .
*/
static void ath_rx_buf_link ( struct ath_softc * sc , struct ath_buf * bf )
{
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2009-11-04 09:11:34 -08:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-08-04 00:16:41 -07:00
struct ath_desc * ds ;
struct sk_buff * skb ;
ATH_RXBUF_RESET ( bf ) ;
ds = bf - > bf_desc ;
2008-11-18 09:05:55 +05:30
ds - > ds_link = 0 ; /* link to null */
2008-08-04 00:16:41 -07:00
ds - > ds_data = bf - > bf_buf_addr ;
2008-11-18 09:05:55 +05:30
/* virtual addr of the beginning of the buffer. */
2008-08-04 00:16:41 -07:00
skb = bf - > bf_mpdu ;
2009-09-13 23:28:00 -07:00
BUG_ON ( skb = = NULL ) ;
2008-08-04 00:16:41 -07:00
ds - > ds_vdata = skb - > data ;
2009-11-04 09:11:34 -08:00
/*
* setup rx descriptors . The rx_bufsize here tells the hardware
2008-11-20 17:15:13 -08:00
* how much data it can DMA to us and that we are prepared
2009-11-04 09:11:34 -08:00
* to process
*/
2008-12-07 21:44:03 +05:30
ath9k_hw_setuprxdesc ( ah , ds ,
2009-11-04 09:11:34 -08:00
common - > rx_bufsize ,
2008-08-04 00:16:41 -07:00
0 ) ;
2008-12-07 21:44:03 +05:30
if ( sc - > rx . rxlink = = NULL )
2008-08-04 00:16:41 -07:00
ath9k_hw_putrxbuf ( ah , bf - > bf_daddr ) ;
else
2008-12-07 21:44:03 +05:30
* sc - > rx . rxlink = bf - > bf_daddr ;
2008-08-04 00:16:41 -07:00
2008-12-07 21:44:03 +05:30
sc - > rx . rxlink = & ds - > ds_link ;
2008-08-04 00:16:41 -07:00
ath9k_hw_rxena ( ah ) ;
}
2008-11-24 12:07:55 +05:30
static void ath_setdefantenna ( struct ath_softc * sc , u32 antenna )
{
/* XXX block beacon interrupts */
ath9k_hw_setantenna ( sc - > sc_ah , antenna ) ;
2008-12-07 21:44:03 +05:30
sc - > rx . defant = antenna ;
sc - > rx . rxotherant = 0 ;
2008-11-24 12:07:55 +05:30
}
2008-08-04 00:16:41 -07:00
static void ath_opmode_init ( struct ath_softc * sc )
{
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2009-09-10 09:22:37 -07:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-08-04 00:16:41 -07:00
u32 rfilt , mfilt [ 2 ] ;
/* configure rx filter */
rfilt = ath_calcrxfilter ( sc ) ;
ath9k_hw_setrxfilter ( ah , rfilt ) ;
/* configure bssid mask */
2009-02-09 13:27:26 +05:30
if ( ah - > caps . hw_caps & ATH9K_HW_CAP_BSSIDMASK )
2009-09-10 17:52:45 -07:00
ath_hw_setbssidmask ( common ) ;
2008-08-04 00:16:41 -07:00
/* configure operational mode */
ath9k_hw_setopmode ( ah ) ;
/* Handle any link-level address change. */
2009-09-10 09:22:37 -07:00
ath9k_hw_setmac ( ah , common - > macaddr ) ;
2008-08-04 00:16:41 -07:00
/* calculate and install multicast filter */
mfilt [ 0 ] = mfilt [ 1 ] = ~ 0 ;
ath9k_hw_setmcastfilter ( ah , mfilt [ 0 ] , mfilt [ 1 ] ) ;
}
2010-04-15 17:38:48 -04:00
static bool ath_rx_edma_buf_link ( struct ath_softc * sc ,
enum ath9k_rx_qtype qtype )
2008-08-04 00:16:41 -07:00
{
2010-04-15 17:38:48 -04:00
struct ath_hw * ah = sc - > sc_ah ;
struct ath_rx_edma * rx_edma ;
2008-08-04 00:16:41 -07:00
struct sk_buff * skb ;
struct ath_buf * bf ;
2010-04-15 17:38:48 -04:00
rx_edma = & sc - > rx . rx_edma [ qtype ] ;
if ( skb_queue_len ( & rx_edma - > rx_fifo ) > = rx_edma - > rx_fifo_hwsize )
return false ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
bf = list_first_entry ( & sc - > rx . rxbuf , struct ath_buf , list ) ;
list_del_init ( & bf - > list ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
skb = bf - > bf_mpdu ;
ATH_RXBUF_RESET ( bf ) ;
memset ( skb - > data , 0 , ah - > caps . rx_status_len ) ;
dma_sync_single_for_device ( sc - > dev , bf - > bf_buf_addr ,
ah - > caps . rx_status_len , DMA_TO_DEVICE ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
SKB_CB_ATHBUF ( skb ) = bf ;
ath9k_hw_addrxbuf_edma ( ah , bf - > bf_buf_addr , qtype ) ;
skb_queue_tail ( & rx_edma - > rx_fifo , skb ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
return true ;
}
static void ath_rx_addbuffer_edma ( struct ath_softc * sc ,
enum ath9k_rx_qtype qtype , int size )
{
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
u32 nbuf = 0 ;
if ( list_empty ( & sc - > rx . rxbuf ) ) {
ath_print ( common , ATH_DBG_QUEUE , " No free rx buf available \n " ) ;
return ;
2009-03-30 15:28:45 +05:30
}
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
while ( ! list_empty ( & sc - > rx . rxbuf ) ) {
nbuf + + ;
if ( ! ath_rx_edma_buf_link ( sc , qtype ) )
break ;
if ( nbuf > = size )
break ;
}
}
static void ath_rx_remove_buffer ( struct ath_softc * sc ,
enum ath9k_rx_qtype qtype )
{
struct ath_buf * bf ;
struct ath_rx_edma * rx_edma ;
struct sk_buff * skb ;
rx_edma = & sc - > rx . rx_edma [ qtype ] ;
while ( ( skb = skb_dequeue ( & rx_edma - > rx_fifo ) ) ! = NULL ) {
bf = SKB_CB_ATHBUF ( skb ) ;
BUG_ON ( ! bf ) ;
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
}
}
static void ath_rx_edma_cleanup ( struct ath_softc * sc )
{
struct ath_buf * bf ;
ath_rx_remove_buffer ( sc , ATH9K_RX_QUEUE_LP ) ;
ath_rx_remove_buffer ( sc , ATH9K_RX_QUEUE_HP ) ;
2009-03-30 15:28:45 +05:30
list_for_each_entry ( bf , & sc - > rx . rxbuf , list ) {
2010-04-15 17:38:48 -04:00
if ( bf - > bf_mpdu )
dev_kfree_skb_any ( bf - > bf_mpdu ) ;
}
INIT_LIST_HEAD ( & sc - > rx . rxbuf ) ;
kfree ( sc - > rx . rx_bufptr ) ;
sc - > rx . rx_bufptr = NULL ;
}
static void ath_rx_edma_init_queue ( struct ath_rx_edma * rx_edma , int size )
{
skb_queue_head_init ( & rx_edma - > rx_fifo ) ;
skb_queue_head_init ( & rx_edma - > rx_buffers ) ;
rx_edma - > rx_fifo_hwsize = size ;
}
static int ath_rx_edma_init ( struct ath_softc * sc , int nbufs )
{
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
struct ath_hw * ah = sc - > sc_ah ;
struct sk_buff * skb ;
struct ath_buf * bf ;
int error = 0 , i ;
u32 size ;
common - > rx_bufsize = roundup ( IEEE80211_MAX_MPDU_LEN +
ah - > caps . rx_status_len ,
min ( common - > cachelsz , ( u16 ) 64 ) ) ;
ath9k_hw_set_rx_bufsize ( ah , common - > rx_bufsize -
ah - > caps . rx_status_len ) ;
ath_rx_edma_init_queue ( & sc - > rx . rx_edma [ ATH9K_RX_QUEUE_LP ] ,
ah - > caps . rx_lp_qdepth ) ;
ath_rx_edma_init_queue ( & sc - > rx . rx_edma [ ATH9K_RX_QUEUE_HP ] ,
ah - > caps . rx_hp_qdepth ) ;
size = sizeof ( struct ath_buf ) * nbufs ;
bf = kzalloc ( size , GFP_KERNEL ) ;
if ( ! bf )
return - ENOMEM ;
INIT_LIST_HEAD ( & sc - > rx . rxbuf ) ;
sc - > rx . rx_bufptr = bf ;
for ( i = 0 ; i < nbufs ; i + + , bf + + ) {
2009-11-04 09:11:34 -08:00
skb = ath_rxbuf_alloc ( common , common - > rx_bufsize , GFP_KERNEL ) ;
2010-04-15 17:38:48 -04:00
if ( ! skb ) {
2009-03-30 15:28:45 +05:30
error = - ENOMEM ;
2010-04-15 17:38:48 -04:00
goto rx_init_fail ;
2008-08-04 00:16:41 -07:00
}
2010-04-15 17:38:48 -04:00
memset ( skb - > data , 0 , common - > rx_bufsize ) ;
2009-03-30 15:28:45 +05:30
bf - > bf_mpdu = skb ;
2010-04-15 17:38:48 -04:00
2009-03-30 15:28:45 +05:30
bf - > bf_buf_addr = dma_map_single ( sc - > dev , skb - > data ,
2009-11-04 09:11:34 -08:00
common - > rx_bufsize ,
2010-04-15 17:38:48 -04:00
DMA_BIDIRECTIONAL ) ;
2009-03-30 15:28:45 +05:30
if ( unlikely ( dma_mapping_error ( sc - > dev ,
2010-04-15 17:38:48 -04:00
bf - > bf_buf_addr ) ) ) {
dev_kfree_skb_any ( skb ) ;
bf - > bf_mpdu = NULL ;
ath_print ( common , ATH_DBG_FATAL ,
" dma_mapping_error() on RX init \n " ) ;
error = - ENOMEM ;
goto rx_init_fail ;
}
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
}
return 0 ;
rx_init_fail :
ath_rx_edma_cleanup ( sc ) ;
return error ;
}
static void ath_edma_start_recv ( struct ath_softc * sc )
{
spin_lock_bh ( & sc - > rx . rxbuflock ) ;
ath9k_hw_rxena ( sc - > sc_ah ) ;
ath_rx_addbuffer_edma ( sc , ATH9K_RX_QUEUE_HP ,
sc - > rx . rx_edma [ ATH9K_RX_QUEUE_HP ] . rx_fifo_hwsize ) ;
ath_rx_addbuffer_edma ( sc , ATH9K_RX_QUEUE_LP ,
sc - > rx . rx_edma [ ATH9K_RX_QUEUE_LP ] . rx_fifo_hwsize ) ;
spin_unlock_bh ( & sc - > rx . rxbuflock ) ;
ath_opmode_init ( sc ) ;
ath9k_hw_startpcureceive ( sc - > sc_ah ) ;
}
static void ath_edma_stop_recv ( struct ath_softc * sc )
{
spin_lock_bh ( & sc - > rx . rxbuflock ) ;
ath_rx_remove_buffer ( sc , ATH9K_RX_QUEUE_HP ) ;
ath_rx_remove_buffer ( sc , ATH9K_RX_QUEUE_LP ) ;
spin_unlock_bh ( & sc - > rx . rxbuflock ) ;
}
int ath_rx_init ( struct ath_softc * sc , int nbufs )
{
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
struct sk_buff * skb ;
struct ath_buf * bf ;
int error = 0 ;
spin_lock_init ( & sc - > rx . rxflushlock ) ;
sc - > sc_flags & = ~ SC_OP_RXFLUSH ;
spin_lock_init ( & sc - > rx . rxbuflock ) ;
if ( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA ) {
return ath_rx_edma_init ( sc , nbufs ) ;
} else {
common - > rx_bufsize = roundup ( IEEE80211_MAX_MPDU_LEN ,
min ( common - > cachelsz , ( u16 ) 64 ) ) ;
ath_print ( common , ATH_DBG_CONFIG , " cachelsz %u rxbufsize %u \n " ,
common - > cachelsz , common - > rx_bufsize ) ;
/* Initialize rx descriptors */
error = ath_descdma_setup ( sc , & sc - > rx . rxdma , & sc - > rx . rxbuf ,
2010-04-15 17:39:33 -04:00
" rx " , nbufs , 1 , 0 ) ;
2010-04-15 17:38:48 -04:00
if ( error ! = 0 ) {
2009-09-13 02:42:02 -07:00
ath_print ( common , ATH_DBG_FATAL ,
2010-04-15 17:38:48 -04:00
" failed to allocate rx descriptors: %d \n " ,
error ) ;
2009-03-30 15:28:45 +05:30
goto err ;
}
2010-04-15 17:38:48 -04:00
list_for_each_entry ( bf , & sc - > rx . rxbuf , list ) {
skb = ath_rxbuf_alloc ( common , common - > rx_bufsize ,
GFP_KERNEL ) ;
if ( skb = = NULL ) {
error = - ENOMEM ;
goto err ;
}
bf - > bf_mpdu = skb ;
bf - > bf_buf_addr = dma_map_single ( sc - > dev , skb - > data ,
common - > rx_bufsize ,
DMA_FROM_DEVICE ) ;
if ( unlikely ( dma_mapping_error ( sc - > dev ,
bf - > bf_buf_addr ) ) ) {
dev_kfree_skb_any ( skb ) ;
bf - > bf_mpdu = NULL ;
ath_print ( common , ATH_DBG_FATAL ,
" dma_mapping_error() on RX init \n " ) ;
error = - ENOMEM ;
goto err ;
}
bf - > bf_dmacontext = bf - > bf_buf_addr ;
}
sc - > rx . rxlink = NULL ;
2009-03-30 15:28:45 +05:30
}
2008-08-04 00:16:41 -07:00
2009-03-30 15:28:45 +05:30
err :
2008-08-04 00:16:41 -07:00
if ( error )
ath_rx_cleanup ( sc ) ;
return error ;
}
void ath_rx_cleanup ( struct ath_softc * sc )
{
2009-11-04 09:11:34 -08:00
struct ath_hw * ah = sc - > sc_ah ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-08-04 00:16:41 -07:00
struct sk_buff * skb ;
struct ath_buf * bf ;
2010-04-15 17:38:48 -04:00
if ( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA ) {
ath_rx_edma_cleanup ( sc ) ;
return ;
} else {
list_for_each_entry ( bf , & sc - > rx . rxbuf , list ) {
skb = bf - > bf_mpdu ;
if ( skb ) {
dma_unmap_single ( sc - > dev , bf - > bf_buf_addr ,
common - > rx_bufsize ,
DMA_FROM_DEVICE ) ;
dev_kfree_skb ( skb ) ;
}
2009-03-23 18:25:01 -04:00
}
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
if ( sc - > rx . rxdma . dd_desc_len ! = 0 )
ath_descdma_cleanup ( sc , & sc - > rx . rxdma , & sc - > rx . rxbuf ) ;
}
2008-08-04 00:16:41 -07:00
}
/*
* Calculate the receive filter according to the
* operating mode and state :
*
* o always accept unicast , broadcast , and multicast traffic
* o maintain current state of phy error reception ( the hal
* may enable phy error frames for noise immunity work )
* o probe request frames are accepted only when operating in
* hostap , adhoc , or monitor modes
* o enable promiscuous mode according to the interface state
* o accept beacons :
* - when operating in adhoc mode so the 802.11 layer creates
* node table entries for peers ,
* - when operating in station mode for collecting rssi data when
* the station is otherwise quiet , or
* - when operating as a repeater so we see repeater - sta beacons
* - when scanning
*/
u32 ath_calcrxfilter ( struct ath_softc * sc )
{
# define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
2008-08-11 14:03:13 +05:30
2008-08-04 00:16:41 -07:00
u32 rfilt ;
rfilt = ( ath9k_hw_getrxfilter ( sc - > sc_ah ) & RX_FILTER_PRESERVE )
| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
| ATH9K_RX_FILTER_MCAST ;
/* If not a STA, enable processing of Probe Requests */
2009-02-09 13:27:26 +05:30
if ( sc - > sc_ah - > opmode ! = NL80211_IFTYPE_STATION )
2008-08-04 00:16:41 -07:00
rfilt | = ATH9K_RX_FILTER_PROBEREQ ;
2009-03-10 10:55:50 +02:00
/*
* Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
* mode interface or when in monitor mode . AP mode does not need this
* since it receives all in - BSS frames anyway .
*/
2009-02-09 13:27:26 +05:30
if ( ( ( sc - > sc_ah - > opmode ! = NL80211_IFTYPE_AP ) & &
2008-12-07 21:44:03 +05:30
( sc - > rx . rxfilter & FIF_PROMISC_IN_BSS ) ) | |
2009-03-10 10:55:50 +02:00
( sc - > sc_ah - > opmode = = NL80211_IFTYPE_MONITOR ) )
2008-08-04 00:16:41 -07:00
rfilt | = ATH9K_RX_FILTER_PROM ;
2009-02-04 08:10:22 +05:30
if ( sc - > rx . rxfilter & FIF_CONTROL )
rfilt | = ATH9K_RX_FILTER_CONTROL ;
2009-02-19 15:41:52 +05:30
if ( ( sc - > sc_ah - > opmode = = NL80211_IFTYPE_STATION ) & &
! ( sc - > rx . rxfilter & FIF_BCN_PRBRESP_PROMISC ) )
rfilt | = ATH9K_RX_FILTER_MYBEACON ;
else
2008-08-04 00:16:41 -07:00
rfilt | = ATH9K_RX_FILTER_BEACON ;
2009-09-18 15:06:07 +05:30
if ( ( AR_SREV_9280_10_OR_LATER ( sc - > sc_ah ) | |
AR_SREV_9285_10_OR_LATER ( sc - > sc_ah ) ) & &
( sc - > sc_ah - > opmode = = NL80211_IFTYPE_AP ) & &
( sc - > rx . rxfilter & FIF_PSPOLL ) )
2009-02-19 15:41:52 +05:30
rfilt | = ATH9K_RX_FILTER_PSPOLL ;
2008-11-18 09:05:55 +05:30
2009-09-03 12:08:43 +05:30
if ( conf_is_ht ( & sc - > hw - > conf ) )
rfilt | = ATH9K_RX_FILTER_COMP_BAR ;
2009-08-20 19:12:07 -07:00
if ( sc - > sec_wiphy | | ( sc - > rx . rxfilter & FIF_OTHER_BSS ) ) {
2009-03-03 19:23:30 +02:00
/* TODO: only needed if more than one BSSID is in use in
* station / adhoc mode */
2009-08-20 19:12:07 -07:00
/* The following may also be needed for other older chips */
if ( sc - > sc_ah - > hw_version . macVersion = = AR_SREV_VERSION_9160 )
rfilt | = ATH9K_RX_FILTER_PROM ;
2009-03-03 19:23:30 +02:00
rfilt | = ATH9K_RX_FILTER_MCAST_BCAST_ALL ;
}
2008-08-04 00:16:41 -07:00
return rfilt ;
2008-08-11 14:03:13 +05:30
2008-08-04 00:16:41 -07:00
# undef RX_FILTER_PRESERVE
}
int ath_startrecv ( struct ath_softc * sc )
{
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2008-08-04 00:16:41 -07:00
struct ath_buf * bf , * tbf ;
2010-04-15 17:38:48 -04:00
if ( ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA ) {
ath_edma_start_recv ( sc ) ;
return 0 ;
}
2008-12-07 21:44:03 +05:30
spin_lock_bh ( & sc - > rx . rxbuflock ) ;
if ( list_empty ( & sc - > rx . rxbuf ) )
2008-08-04 00:16:41 -07:00
goto start_recv ;
2008-12-07 21:44:03 +05:30
sc - > rx . rxlink = NULL ;
list_for_each_entry_safe ( bf , tbf , & sc - > rx . rxbuf , list ) {
2008-08-04 00:16:41 -07:00
ath_rx_buf_link ( sc , bf ) ;
}
/* We could have deleted elements so the list may be empty now */
2008-12-07 21:44:03 +05:30
if ( list_empty ( & sc - > rx . rxbuf ) )
2008-08-04 00:16:41 -07:00
goto start_recv ;
2008-12-07 21:44:03 +05:30
bf = list_first_entry ( & sc - > rx . rxbuf , struct ath_buf , list ) ;
2008-08-04 00:16:41 -07:00
ath9k_hw_putrxbuf ( ah , bf - > bf_daddr ) ;
2008-11-18 09:05:55 +05:30
ath9k_hw_rxena ( ah ) ;
2008-08-04 00:16:41 -07:00
start_recv :
2008-12-07 21:44:03 +05:30
spin_unlock_bh ( & sc - > rx . rxbuflock ) ;
2008-11-18 09:05:55 +05:30
ath_opmode_init ( sc ) ;
ath9k_hw_startpcureceive ( ah ) ;
2008-08-04 00:16:41 -07:00
return 0 ;
}
bool ath_stoprecv ( struct ath_softc * sc )
{
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2008-08-04 00:16:41 -07:00
bool stopped ;
2008-11-18 09:05:55 +05:30
ath9k_hw_stoppcurecv ( ah ) ;
ath9k_hw_setrxfilter ( ah , 0 ) ;
stopped = ath9k_hw_stopdmarecv ( ah ) ;
2010-04-15 17:38:48 -04:00
if ( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA )
ath_edma_stop_recv ( sc ) ;
else
sc - > rx . rxlink = NULL ;
2008-11-18 09:05:55 +05:30
2008-08-04 00:16:41 -07:00
return stopped ;
}
void ath_flushrecv ( struct ath_softc * sc )
{
2008-12-07 21:44:03 +05:30
spin_lock_bh ( & sc - > rx . rxflushlock ) ;
2008-08-11 14:05:46 +05:30
sc - > sc_flags | = SC_OP_RXFLUSH ;
2010-04-15 17:38:48 -04:00
if ( sc - > sc_ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA )
ath_rx_tasklet ( sc , 1 , true ) ;
ath_rx_tasklet ( sc , 1 , false ) ;
2008-08-11 14:05:46 +05:30
sc - > sc_flags & = ~ SC_OP_RXFLUSH ;
2008-12-07 21:44:03 +05:30
spin_unlock_bh ( & sc - > rx . rxflushlock ) ;
2008-08-04 00:16:41 -07:00
}
2009-05-14 21:28:48 +03:00
static bool ath_beacon_dtim_pending_cab ( struct sk_buff * skb )
{
/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
struct ieee80211_mgmt * mgmt ;
u8 * pos , * end , id , elen ;
struct ieee80211_tim_ie * tim ;
mgmt = ( struct ieee80211_mgmt * ) skb - > data ;
pos = mgmt - > u . beacon . variable ;
end = skb - > data + skb - > len ;
while ( pos + 2 < end ) {
id = * pos + + ;
elen = * pos + + ;
if ( pos + elen > end )
break ;
if ( id = = WLAN_EID_TIM ) {
if ( elen < sizeof ( * tim ) )
break ;
tim = ( struct ieee80211_tim_ie * ) pos ;
if ( tim - > dtim_count ! = 0 )
break ;
return tim - > bitmap_ctrl & 0x01 ;
}
pos + = elen ;
}
return false ;
}
static void ath_rx_ps_beacon ( struct ath_softc * sc , struct sk_buff * skb )
{
struct ieee80211_mgmt * mgmt ;
2009-09-10 09:22:37 -07:00
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
2009-05-14 21:28:48 +03:00
if ( skb - > len < 24 + 8 + 2 + 2 )
return ;
mgmt = ( struct ieee80211_mgmt * ) skb - > data ;
2009-09-10 09:22:37 -07:00
if ( memcmp ( common - > curbssid , mgmt - > bssid , ETH_ALEN ) ! = 0 )
2009-05-14 21:28:48 +03:00
return ; /* not from our current AP */
2010-01-08 10:36:05 +05:30
sc - > ps_flags & = ~ PS_WAIT_FOR_BEACON ;
2009-06-19 12:17:48 +02:00
2010-01-08 10:36:05 +05:30
if ( sc - > ps_flags & PS_BEACON_SYNC ) {
sc - > ps_flags & = ~ PS_BEACON_SYNC ;
2009-09-13 02:42:02 -07:00
ath_print ( common , ATH_DBG_PS ,
" Reconfigure Beacon timers based on "
" timestamp from the AP \n " ) ;
2009-05-20 21:59:08 +03:00
ath_beacon_config ( sc , NULL ) ;
}
2009-05-14 21:28:48 +03:00
if ( ath_beacon_dtim_pending_cab ( skb ) ) {
/*
* Remain awake waiting for buffered broadcast / multicast
2009-06-17 20:53:20 +02:00
* frames . If the last broadcast / multicast frame is not
* received properly , the next beacon frame will work as
* a backup trigger for returning into NETWORK SLEEP state ,
* so we are waiting for it as well .
2009-05-14 21:28:48 +03:00
*/
2009-09-13 02:42:02 -07:00
ath_print ( common , ATH_DBG_PS , " Received DTIM beacon indicating "
" buffered broadcast/multicast frame(s) \n " ) ;
2010-01-08 10:36:05 +05:30
sc - > ps_flags | = PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON ;
2009-05-14 21:28:48 +03:00
return ;
}
2010-01-08 10:36:05 +05:30
if ( sc - > ps_flags & PS_WAIT_FOR_CAB ) {
2009-05-14 21:28:48 +03:00
/*
* This can happen if a broadcast frame is dropped or the AP
* fails to send a frame indicating that all CAB frames have
* been delivered .
*/
2010-01-08 10:36:05 +05:30
sc - > ps_flags & = ~ PS_WAIT_FOR_CAB ;
2009-09-13 02:42:02 -07:00
ath_print ( common , ATH_DBG_PS ,
" PS wait for CAB frames timed out \n " ) ;
2009-05-14 21:28:48 +03:00
}
}
static void ath_rx_ps ( struct ath_softc * sc , struct sk_buff * skb )
{
struct ieee80211_hdr * hdr ;
2009-09-13 02:42:02 -07:00
struct ath_common * common = ath9k_hw_common ( sc - > sc_ah ) ;
2009-05-14 21:28:48 +03:00
hdr = ( struct ieee80211_hdr * ) skb - > data ;
/* Process Beacon and CAB receive in PS state */
2010-05-22 23:58:13 -07:00
if ( ( ( sc - > ps_flags & PS_WAIT_FOR_BEACON ) | | ath9k_check_auto_sleep ( sc ) )
& & ieee80211_is_beacon ( hdr - > frame_control ) )
2009-05-14 21:28:48 +03:00
ath_rx_ps_beacon ( sc , skb ) ;
2010-01-08 10:36:05 +05:30
else if ( ( sc - > ps_flags & PS_WAIT_FOR_CAB ) & &
2009-05-14 21:28:48 +03:00
( ieee80211_is_data ( hdr - > frame_control ) | |
ieee80211_is_action ( hdr - > frame_control ) ) & &
is_multicast_ether_addr ( hdr - > addr1 ) & &
! ieee80211_has_moredata ( hdr - > frame_control ) ) {
/*
* No more broadcast / multicast frames to be received at this
* point .
*/
2010-01-08 10:36:05 +05:30
sc - > ps_flags & = ~ PS_WAIT_FOR_CAB ;
2009-09-13 02:42:02 -07:00
ath_print ( common , ATH_DBG_PS ,
" All PS CAB frames received, back to sleep \n " ) ;
2010-01-08 10:36:05 +05:30
} else if ( ( sc - > ps_flags & PS_WAIT_FOR_PSPOLL_DATA ) & &
2009-05-19 17:01:38 +03:00
! is_multicast_ether_addr ( hdr - > addr1 ) & &
! ieee80211_has_morefrags ( hdr - > frame_control ) ) {
2010-01-08 10:36:05 +05:30
sc - > ps_flags & = ~ PS_WAIT_FOR_PSPOLL_DATA ;
2009-09-13 02:42:02 -07:00
ath_print ( common , ATH_DBG_PS ,
" Going back to sleep after having received "
2010-01-29 17:22:12 -05:00
" PS-Poll data (0x%lx) \n " ,
2010-01-08 10:36:05 +05:30
sc - > ps_flags & ( PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK ) ) ;
2009-05-14 21:28:48 +03:00
}
}
2009-11-02 11:36:08 -08:00
static void ath_rx_send_to_mac80211 ( struct ieee80211_hw * hw ,
struct ath_softc * sc , struct sk_buff * skb ,
2009-11-04 08:20:42 -08:00
struct ieee80211_rx_status * rxs )
2009-05-14 21:28:47 +03:00
{
struct ieee80211_hdr * hdr ;
hdr = ( struct ieee80211_hdr * ) skb - > data ;
/* Send the frame to mac80211 */
if ( is_multicast_ether_addr ( hdr - > addr1 ) ) {
int i ;
/*
* Deliver broadcast / multicast frames to all suitable
* virtual wiphys .
*/
/* TODO: filter based on channel configuration */
for ( i = 0 ; i < sc - > num_sec_wiphy ; i + + ) {
struct ath_wiphy * aphy = sc - > sec_wiphy [ i ] ;
struct sk_buff * nskb ;
if ( aphy = = NULL )
continue ;
nskb = skb_copy ( skb , GFP_ATOMIC ) ;
2009-11-04 08:20:42 -08:00
if ( ! nskb )
continue ;
ieee80211_rx ( aphy - > hw , nskb ) ;
2009-05-14 21:28:47 +03:00
}
2009-06-17 13:13:00 +02:00
ieee80211_rx ( sc - > hw , skb ) ;
2009-11-04 08:20:42 -08:00
} else
2009-05-14 21:28:47 +03:00
/* Deliver unicast frames based on receiver address */
2009-11-02 11:36:08 -08:00
ieee80211_rx ( hw , skb ) ;
2009-05-14 21:28:47 +03:00
}
2010-04-15 17:38:48 -04:00
static bool ath_edma_get_buffers ( struct ath_softc * sc ,
enum ath9k_rx_qtype qtype )
2008-08-04 00:16:41 -07:00
{
2010-04-15 17:38:48 -04:00
struct ath_rx_edma * rx_edma = & sc - > rx . rx_edma [ qtype ] ;
struct ath_hw * ah = sc - > sc_ah ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
struct sk_buff * skb ;
struct ath_buf * bf ;
int ret ;
skb = skb_peek ( & rx_edma - > rx_fifo ) ;
if ( ! skb )
return false ;
bf = SKB_CB_ATHBUF ( skb ) ;
BUG_ON ( ! bf ) ;
dma_sync_single_for_device ( sc - > dev , bf - > bf_buf_addr ,
common - > rx_bufsize , DMA_FROM_DEVICE ) ;
ret = ath9k_hw_process_rxdesc_edma ( ah , NULL , skb - > data ) ;
if ( ret = = - EINPROGRESS )
return false ;
__skb_unlink ( skb , & rx_edma - > rx_fifo ) ;
if ( ret = = - EINVAL ) {
/* corrupt descriptor, skip this one and the following one */
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
ath_rx_edma_buf_link ( sc , qtype ) ;
skb = skb_peek ( & rx_edma - > rx_fifo ) ;
if ( ! skb )
return true ;
bf = SKB_CB_ATHBUF ( skb ) ;
BUG_ON ( ! bf ) ;
__skb_unlink ( skb , & rx_edma - > rx_fifo ) ;
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
ath_rx_edma_buf_link ( sc , qtype ) ;
2010-05-10 19:41:34 -07:00
return true ;
2010-04-15 17:38:48 -04:00
}
skb_queue_tail ( & rx_edma - > rx_buffers , skb ) ;
return true ;
}
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
static struct ath_buf * ath_edma_get_next_rx_buf ( struct ath_softc * sc ,
struct ath_rx_status * rs ,
enum ath9k_rx_qtype qtype )
{
struct ath_rx_edma * rx_edma = & sc - > rx . rx_edma [ qtype ] ;
struct sk_buff * skb ;
2008-11-18 09:05:55 +05:30
struct ath_buf * bf ;
2010-04-15 17:38:48 -04:00
while ( ath_edma_get_buffers ( sc , qtype ) ) ;
skb = __skb_dequeue ( & rx_edma - > rx_buffers ) ;
if ( ! skb )
return NULL ;
bf = SKB_CB_ATHBUF ( skb ) ;
ath9k_hw_process_rxdesc_edma ( sc - > sc_ah , rs , skb - > data ) ;
return bf ;
}
static struct ath_buf * ath_get_next_rx_buf ( struct ath_softc * sc ,
struct ath_rx_status * rs )
{
struct ath_hw * ah = sc - > sc_ah ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-08-04 00:16:41 -07:00
struct ath_desc * ds ;
2010-04-15 17:38:48 -04:00
struct ath_buf * bf ;
int ret ;
if ( list_empty ( & sc - > rx . rxbuf ) ) {
sc - > rx . rxlink = NULL ;
return NULL ;
}
bf = list_first_entry ( & sc - > rx . rxbuf , struct ath_buf , list ) ;
ds = bf - > bf_desc ;
/*
* Must provide the virtual address of the current
* descriptor , the physical address , and the virtual
* address of the next descriptor in the h / w chain .
* This allows the HAL to look ahead to see if the
* hardware is done with a descriptor by checking the
* done bit in the following descriptor and the address
* of the current descriptor the DMA engine is working
* on . All this is necessary because of our use of
* a self - linked list to avoid rx overruns .
*/
ret = ath9k_hw_rxprocdesc ( ah , ds , rs , 0 ) ;
if ( ret = = - EINPROGRESS ) {
struct ath_rx_status trs ;
struct ath_buf * tbf ;
struct ath_desc * tds ;
memset ( & trs , 0 , sizeof ( trs ) ) ;
if ( list_is_last ( & bf - > list , & sc - > rx . rxbuf ) ) {
sc - > rx . rxlink = NULL ;
return NULL ;
}
tbf = list_entry ( bf - > list . next , struct ath_buf , list ) ;
/*
* On some hardware the descriptor status words could
* get corrupted , including the done bit . Because of
* this , check if the next descriptor ' s done bit is
* set or not .
*
* If the next descriptor ' s done bit is set , the current
* descriptor has been corrupted . Force s / w to discard
* this descriptor and continue . . .
*/
tds = tbf - > bf_desc ;
ret = ath9k_hw_rxprocdesc ( ah , tds , & trs , 0 ) ;
if ( ret = = - EINPROGRESS )
return NULL ;
}
if ( ! bf - > bf_mpdu )
return bf ;
/*
* Synchronize the DMA transfer with CPU before
* 1. accessing the frame
* 2. requeueing the same buffer to h / w
*/
dma_sync_single_for_device ( sc - > dev , bf - > bf_buf_addr ,
common - > rx_bufsize ,
DMA_FROM_DEVICE ) ;
return bf ;
}
2010-05-20 15:34:38 +05:30
/* Assumes you've already done the endian to CPU conversion */
static bool ath9k_rx_accept ( struct ath_common * common ,
2010-05-20 14:34:46 -07:00
struct ieee80211_hdr * hdr ,
2010-05-20 15:34:38 +05:30
struct ieee80211_rx_status * rxs ,
struct ath_rx_status * rx_stats ,
bool * decrypt_error )
{
struct ath_hw * ah = common - > ah ;
__le16 fc ;
2010-05-20 14:34:48 -07:00
u8 rx_status_len = ah - > caps . rx_status_len ;
2010-05-20 15:34:38 +05:30
fc = hdr - > frame_control ;
if ( ! rx_stats - > rs_datalen )
return false ;
/*
* rs_status follows rs_datalen so if rs_datalen is too large
* we can take a hint that hardware corrupted it , so ignore
* those frames .
*/
2010-05-20 14:34:48 -07:00
if ( rx_stats - > rs_datalen > ( common - > rx_bufsize - rx_status_len ) )
2010-05-20 15:34:38 +05:30
return false ;
/*
* rs_more indicates chained descriptors which can be used
* to link buffers together for a sort of scatter - gather
* operation .
* reject the frame , we don ' t support scatter - gather yet and
* the frame is probably corrupt anyway
*/
if ( rx_stats - > rs_more )
return false ;
/*
* The rx_stats - > rs_status will not be set until the end of the
* chained descriptors so it can be ignored if rs_more is set . The
* rs_more will be false at the last element of the chained
* descriptors .
*/
if ( rx_stats - > rs_status ! = 0 ) {
if ( rx_stats - > rs_status & ATH9K_RXERR_CRC )
rxs - > flag | = RX_FLAG_FAILED_FCS_CRC ;
if ( rx_stats - > rs_status & ATH9K_RXERR_PHY )
return false ;
if ( rx_stats - > rs_status & ATH9K_RXERR_DECRYPT ) {
* decrypt_error = true ;
} else if ( rx_stats - > rs_status & ATH9K_RXERR_MIC ) {
if ( ieee80211_is_ctl ( fc ) )
/*
* Sometimes , we get invalid
* MIC failures on valid control frames .
* Remove these mic errors .
*/
rx_stats - > rs_status & = ~ ATH9K_RXERR_MIC ;
else
rxs - > flag | = RX_FLAG_MMIC_ERROR ;
}
/*
* Reject error frames with the exception of
* decryption and MIC failures . For monitor mode ,
* we also ignore the CRC error .
*/
if ( ah - > opmode = = NL80211_IFTYPE_MONITOR ) {
if ( rx_stats - > rs_status &
~ ( ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
ATH9K_RXERR_CRC ) )
return false ;
} else {
if ( rx_stats - > rs_status &
~ ( ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC ) ) {
return false ;
}
}
}
return true ;
}
static int ath9k_process_rate ( struct ath_common * common ,
struct ieee80211_hw * hw ,
struct ath_rx_status * rx_stats ,
2010-05-20 14:34:46 -07:00
struct ieee80211_rx_status * rxs )
2010-05-20 15:34:38 +05:30
{
struct ieee80211_supported_band * sband ;
enum ieee80211_band band ;
unsigned int i = 0 ;
band = hw - > conf . channel - > band ;
sband = hw - > wiphy - > bands [ band ] ;
if ( rx_stats - > rs_rate & 0x80 ) {
/* HT rate */
rxs - > flag | = RX_FLAG_HT ;
if ( rx_stats - > rs_flags & ATH9K_RX_2040 )
rxs - > flag | = RX_FLAG_40MHZ ;
if ( rx_stats - > rs_flags & ATH9K_RX_GI )
rxs - > flag | = RX_FLAG_SHORT_GI ;
rxs - > rate_idx = rx_stats - > rs_rate & 0x7f ;
return 0 ;
}
for ( i = 0 ; i < sband - > n_bitrates ; i + + ) {
if ( sband - > bitrates [ i ] . hw_value = = rx_stats - > rs_rate ) {
rxs - > rate_idx = i ;
return 0 ;
}
if ( sband - > bitrates [ i ] . hw_value_short = = rx_stats - > rs_rate ) {
rxs - > flag | = RX_FLAG_SHORTPRE ;
rxs - > rate_idx = i ;
return 0 ;
}
}
/*
* No valid hardware bitrate found - - we should not get here
* because hardware has already validated this frame as OK .
*/
ath_print ( common , ATH_DBG_XMIT , " unsupported hw bitrate detected "
" 0x%02x using 1 Mbit \n " , rx_stats - > rs_rate ) ;
return - EINVAL ;
}
static void ath9k_process_rssi ( struct ath_common * common ,
struct ieee80211_hw * hw ,
2010-05-20 14:34:46 -07:00
struct ieee80211_hdr * hdr ,
2010-05-20 15:34:38 +05:30
struct ath_rx_status * rx_stats )
{
struct ath_hw * ah = common - > ah ;
struct ieee80211_sta * sta ;
struct ath_node * an ;
int last_rssi = ATH_RSSI_DUMMY_MARKER ;
__le16 fc ;
fc = hdr - > frame_control ;
rcu_read_lock ( ) ;
/*
* XXX : use ieee80211_find_sta ! This requires quite a bit of work
* under the current ath9k virtual wiphy implementation as we have
* no way of tying a vif to wiphy . Typically vifs are attached to
* at least one sdata of a wiphy on mac80211 but with ath9k virtual
* wiphy you ' d have to iterate over every wiphy and each sdata .
*/
sta = ieee80211_find_sta_by_hw ( hw , hdr - > addr2 ) ;
if ( sta ) {
an = ( struct ath_node * ) sta - > drv_priv ;
if ( rx_stats - > rs_rssi ! = ATH9K_RSSI_BAD & &
! rx_stats - > rs_moreaggr )
ATH_RSSI_LPF ( an - > last_rssi , rx_stats - > rs_rssi ) ;
last_rssi = an - > last_rssi ;
}
rcu_read_unlock ( ) ;
if ( likely ( last_rssi ! = ATH_RSSI_DUMMY_MARKER ) )
rx_stats - > rs_rssi = ATH_EP_RND ( last_rssi ,
ATH_RSSI_EP_MULTIPLIER ) ;
if ( rx_stats - > rs_rssi < 0 )
rx_stats - > rs_rssi = 0 ;
/* Update Beacon RSSI, this is used by ANI. */
if ( ieee80211_is_beacon ( fc ) )
ah - > stats . avgbrssi = rx_stats - > rs_rssi ;
}
/*
* For Decrypt or Demic errors , we only mark packet status here and always push
* up the frame up to let mac80211 handle the actual error case , be it no
* decryption key or real decryption error . This let us keep statistics there .
*/
static int ath9k_rx_skb_preprocess ( struct ath_common * common ,
struct ieee80211_hw * hw ,
2010-05-20 14:34:46 -07:00
struct ieee80211_hdr * hdr ,
2010-05-20 15:34:38 +05:30
struct ath_rx_status * rx_stats ,
struct ieee80211_rx_status * rx_status ,
bool * decrypt_error )
{
struct ath_hw * ah = common - > ah ;
memset ( rx_status , 0 , sizeof ( struct ieee80211_rx_status ) ) ;
/*
* everything but the rate is checked here , the rate check is done
* separately to avoid doing two lookups for a rate for each frame .
*/
2010-05-20 14:34:46 -07:00
if ( ! ath9k_rx_accept ( common , hdr , rx_status , rx_stats , decrypt_error ) )
2010-05-20 15:34:38 +05:30
return - EINVAL ;
2010-05-20 14:34:46 -07:00
ath9k_process_rssi ( common , hw , hdr , rx_stats ) ;
2010-05-20 15:34:38 +05:30
2010-05-20 14:34:46 -07:00
if ( ath9k_process_rate ( common , hw , rx_stats , rx_status ) )
2010-05-20 15:34:38 +05:30
return - EINVAL ;
rx_status - > mactime = ath9k_hw_extend_tsf ( ah , rx_stats - > rs_tstamp ) ;
rx_status - > band = hw - > conf . channel - > band ;
rx_status - > freq = hw - > conf . channel - > center_freq ;
rx_status - > signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats - > rs_rssi ;
rx_status - > antenna = rx_stats - > rs_antenna ;
rx_status - > flag | = RX_FLAG_TSFT ;
return 0 ;
}
static void ath9k_rx_skb_postprocess ( struct ath_common * common ,
struct sk_buff * skb ,
struct ath_rx_status * rx_stats ,
struct ieee80211_rx_status * rxs ,
bool decrypt_error )
{
struct ath_hw * ah = common - > ah ;
struct ieee80211_hdr * hdr ;
int hdrlen , padpos , padsize ;
u8 keyix ;
__le16 fc ;
/* see if any padding is done by the hw and remove it */
hdr = ( struct ieee80211_hdr * ) skb - > data ;
hdrlen = ieee80211_get_hdrlen_from_skb ( skb ) ;
fc = hdr - > frame_control ;
padpos = ath9k_cmn_padpos ( hdr - > frame_control ) ;
/* The MAC header is padded to have 32-bit boundary if the
* packet payload is non - zero . The general calculation for
* padsize would take into account odd header lengths :
* padsize = ( 4 - padpos % 4 ) % 4 ; However , since only
* even - length headers are used , padding can only be 0 or 2
* bytes and we can optimize this a bit . In addition , we must
* not try to remove padding from short control frames that do
* not have payload . */
padsize = padpos & 3 ;
if ( padsize & & skb - > len > = padpos + padsize + FCS_LEN ) {
memmove ( skb - > data + padsize , skb - > data , padpos ) ;
skb_pull ( skb , padsize ) ;
}
keyix = rx_stats - > rs_keyix ;
if ( ! ( keyix = = ATH9K_RXKEYIX_INVALID ) & & ! decrypt_error & &
ieee80211_has_protected ( fc ) ) {
rxs - > flag | = RX_FLAG_DECRYPTED ;
} else if ( ieee80211_has_protected ( fc )
& & ! decrypt_error & & skb - > len > = hdrlen + 4 ) {
keyix = skb - > data [ hdrlen + 3 ] > > 6 ;
if ( test_bit ( keyix , common - > keymap ) )
rxs - > flag | = RX_FLAG_DECRYPTED ;
}
if ( ah - > sw_mgmt_crypto & &
( rxs - > flag & RX_FLAG_DECRYPTED ) & &
ieee80211_is_mgmt ( fc ) )
/* Use software decrypt for management frames. */
rxs - > flag & = ~ RX_FLAG_DECRYPTED ;
}
2010-04-15 17:38:48 -04:00
int ath_rx_tasklet ( struct ath_softc * sc , int flush , bool hp )
{
struct ath_buf * bf ;
2008-11-21 17:41:33 -08:00
struct sk_buff * skb = NULL , * requeue_skb ;
2009-11-04 08:20:42 -08:00
struct ieee80211_rx_status * rxs ;
2009-02-09 13:27:12 +05:30
struct ath_hw * ah = sc - > sc_ah ;
2009-09-10 11:08:14 -07:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2009-11-02 11:36:08 -08:00
/*
* The hw can techncically differ from common - > hw when using ath9k
* virtual wiphy so to account for that we iterate over the active
* wiphys and find the appropriate wiphy and therefore hw .
*/
struct ieee80211_hw * hw = NULL ;
2008-11-18 09:05:55 +05:30
struct ieee80211_hdr * hdr ;
2009-11-04 16:47:22 -08:00
int retval ;
2008-11-18 09:05:55 +05:30
bool decrypt_error = false ;
2010-03-29 20:14:23 -07:00
struct ath_rx_status rs ;
2010-04-15 17:38:48 -04:00
enum ath9k_rx_qtype qtype ;
bool edma = ! ! ( ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA ) ;
int dma_type ;
2010-05-20 14:34:47 -07:00
u8 rx_status_len = ah - > caps . rx_status_len ;
2008-11-18 09:05:55 +05:30
2010-04-15 17:38:48 -04:00
if ( edma )
dma_type = DMA_BIDIRECTIONAL ;
2010-05-14 21:15:38 +08:00
else
dma_type = DMA_FROM_DEVICE ;
2010-04-15 17:38:48 -04:00
qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP ;
2008-12-07 21:44:03 +05:30
spin_lock_bh ( & sc - > rx . rxbuflock ) ;
2008-08-04 00:16:41 -07:00
do {
/* If handling rx interrupt and flush is in progress => exit */
2008-08-11 14:05:46 +05:30
if ( ( sc - > sc_flags & SC_OP_RXFLUSH ) & & ( flush = = 0 ) )
2008-08-04 00:16:41 -07:00
break ;
2010-03-29 20:14:23 -07:00
memset ( & rs , 0 , sizeof ( rs ) ) ;
2010-04-15 17:38:48 -04:00
if ( edma )
bf = ath_edma_get_next_rx_buf ( sc , & rs , qtype ) ;
else
bf = ath_get_next_rx_buf ( sc , & rs ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
if ( ! bf )
break ;
2008-08-04 00:16:41 -07:00
skb = bf - > bf_mpdu ;
2008-11-18 09:05:55 +05:30
if ( ! skb )
2008-08-04 00:16:41 -07:00
continue ;
2010-05-20 14:34:47 -07:00
hdr = ( struct ieee80211_hdr * ) ( skb - > data + rx_status_len ) ;
2009-11-04 08:20:42 -08:00
rxs = IEEE80211_SKB_RXCB ( skb ) ;
2009-11-02 11:36:08 -08:00
hw = ath_get_virt_hw ( sc , hdr ) ;
2010-03-29 20:14:23 -07:00
ath_debug_stat_rx ( sc , & rs ) ;
2010-01-08 10:36:11 +05:30
2008-08-04 00:16:41 -07:00
/*
2008-11-18 09:05:55 +05:30
* If we ' re asked to flush receive queue , directly
* chain it back at the queue without processing it .
2008-08-04 00:16:41 -07:00
*/
2008-11-18 09:05:55 +05:30
if ( flush )
2008-11-21 17:41:33 -08:00
goto requeue ;
2008-08-04 00:16:41 -07:00
2010-05-20 14:34:46 -07:00
retval = ath9k_rx_skb_preprocess ( common , hw , hdr , & rs ,
2010-05-20 15:34:38 +05:30
rxs , & decrypt_error ) ;
2009-11-04 16:34:33 -08:00
if ( retval )
2008-11-21 17:41:33 -08:00
goto requeue ;
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer ' s skb */
2009-11-04 09:11:34 -08:00
requeue_skb = ath_rxbuf_alloc ( common , common - > rx_bufsize , GFP_ATOMIC ) ;
2008-11-21 17:41:33 -08:00
/* If there is no memory we ignore the current RX'd frame,
* tell hardware it can give us a new frame using the old
2008-12-07 21:44:03 +05:30
* skb and put it at the tail of the sc - > rx . rxbuf list for
2008-11-21 17:41:33 -08:00
* processing . */
if ( ! requeue_skb )
goto requeue ;
2008-08-04 00:16:41 -07:00
2008-12-15 20:40:46 +05:30
/* Unmap the frame */
2009-01-14 20:17:03 +01:00
dma_unmap_single ( sc - > dev , bf - > bf_buf_addr ,
2009-11-04 09:11:34 -08:00
common - > rx_bufsize ,
2010-04-15 17:38:48 -04:00
dma_type ) ;
2008-08-04 00:16:41 -07:00
2010-04-15 17:38:48 -04:00
skb_put ( skb , rs . rs_datalen + ah - > caps . rx_status_len ) ;
if ( ah - > caps . rx_status_len )
skb_pull ( skb , ah - > caps . rx_status_len ) ;
2008-11-18 09:05:55 +05:30
2010-05-20 15:34:38 +05:30
ath9k_rx_skb_postprocess ( common , skb , & rs ,
rxs , decrypt_error ) ;
2008-11-18 09:05:55 +05:30
2008-11-21 17:41:33 -08:00
/* We will now give hardware our shiny new allocated skb */
bf - > bf_mpdu = requeue_skb ;
2009-01-14 20:17:03 +01:00
bf - > bf_buf_addr = dma_map_single ( sc - > dev , requeue_skb - > data ,
2009-11-04 09:11:34 -08:00
common - > rx_bufsize ,
2010-04-15 17:38:48 -04:00
dma_type ) ;
2009-01-14 20:17:03 +01:00
if ( unlikely ( dma_mapping_error ( sc - > dev ,
2008-12-03 03:35:29 -08:00
bf - > bf_buf_addr ) ) ) {
dev_kfree_skb_any ( requeue_skb ) ;
bf - > bf_mpdu = NULL ;
2009-09-13 02:42:02 -07:00
ath_print ( common , ATH_DBG_FATAL ,
" dma_mapping_error() on RX \n " ) ;
2009-11-04 08:20:42 -08:00
ath_rx_send_to_mac80211 ( hw , sc , skb , rxs ) ;
2008-12-03 03:35:29 -08:00
break ;
}
2008-11-21 17:41:33 -08:00
bf - > bf_dmacontext = bf - > bf_buf_addr ;
2008-08-04 00:16:41 -07:00
/*
* change the default rx antenna if rx diversity chooses the
* other antenna 3 times in a row .
*/
2010-03-29 20:14:23 -07:00
if ( sc - > rx . defant ! = rs . rs_antenna ) {
2008-12-07 21:44:03 +05:30
if ( + + sc - > rx . rxotherant > = 3 )
2010-03-29 20:14:23 -07:00
ath_setdefantenna ( sc , rs . rs_antenna ) ;
2008-08-04 00:16:41 -07:00
} else {
2008-12-07 21:44:03 +05:30
sc - > rx . rxotherant = 0 ;
2008-08-04 00:16:41 -07:00
}
2009-01-20 11:17:08 +05:30
2010-05-22 23:58:13 -07:00
if ( unlikely ( ath9k_check_auto_sleep ( sc ) | |
( sc - > ps_flags & ( PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA ) ) ) )
2009-05-14 21:28:48 +03:00
ath_rx_ps ( sc , skb ) ;
2009-11-04 08:20:42 -08:00
ath_rx_send_to_mac80211 ( hw , sc , skb , rxs ) ;
2009-05-14 21:28:48 +03:00
2008-11-21 17:41:33 -08:00
requeue :
2010-04-15 17:38:48 -04:00
if ( edma ) {
list_add_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
ath_rx_edma_buf_link ( sc , qtype ) ;
} else {
list_move_tail ( & bf - > list , & sc - > rx . rxbuf ) ;
ath_rx_buf_link ( sc , bf ) ;
}
2008-11-18 09:05:55 +05:30
} while ( 1 ) ;
2008-12-07 21:44:03 +05:30
spin_unlock_bh ( & sc - > rx . rxbuflock ) ;
2008-08-04 00:16:41 -07:00
return 0 ;
}