2008-08-04 11:16:41 +04:00
/*
* Copyright ( c ) 2008 Atheros Communications Inc .
*
* Permission to use , copy , modify , and / or distribute this software for any
* purpose with or without fee is hereby granted , provided that the above
* copyright notice and this permission notice appear in all copies .
*
* THE SOFTWARE IS PROVIDED " AS IS " AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS . IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL , DIRECT , INDIRECT , OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
* ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE .
*/
# include "core.h"
/*
* Setup and link descriptors .
*
* 11 N : we can no longer afford to self link the last descriptor .
* MAC acknowledges BA status as long as it copies frames to host
* buffer ( or rx fifo ) . This can incorrectly acknowledge packets
* to a sender if last desc is self - linked .
*/
static void ath_rx_buf_link ( struct ath_softc * sc , struct ath_buf * bf )
{
struct ath_hal * ah = sc - > sc_ah ;
struct ath_desc * ds ;
struct sk_buff * skb ;
ATH_RXBUF_RESET ( bf ) ;
ds = bf - > bf_desc ;
2008-11-18 06:35:55 +03:00
ds - > ds_link = 0 ; /* link to null */
2008-08-04 11:16:41 +04:00
ds - > ds_data = bf - > bf_buf_addr ;
2008-11-18 06:35:55 +03:00
/* virtual addr of the beginning of the buffer. */
2008-08-04 11:16:41 +04:00
skb = bf - > bf_mpdu ;
ASSERT ( skb ! = NULL ) ;
ds - > ds_vdata = skb - > data ;
2008-11-21 04:15:13 +03:00
/* setup rx descriptors. The sc_rxbufsize here tells the harware
* how much data it can DMA to us and that we are prepared
* to process */
2008-08-04 11:16:41 +04:00
ath9k_hw_setuprxdesc ( ah ,
ds ,
2008-11-21 04:15:13 +03:00
sc - > sc_rxbufsize ,
2008-08-04 11:16:41 +04:00
0 ) ;
if ( sc - > sc_rxlink = = NULL )
ath9k_hw_putrxbuf ( ah , bf - > bf_daddr ) ;
else
* sc - > sc_rxlink = bf - > bf_daddr ;
sc - > sc_rxlink = & ds - > ds_link ;
ath9k_hw_rxena ( ah ) ;
}
2008-11-24 09:37:55 +03:00
static void ath_setdefantenna ( struct ath_softc * sc , u32 antenna )
{
/* XXX block beacon interrupts */
ath9k_hw_setantenna ( sc - > sc_ah , antenna ) ;
sc - > sc_defant = antenna ;
sc - > sc_rxotherant = 0 ;
}
/*
* Extend 15 - bit time stamp from rx descriptor to
* a full 64 - bit TSF using the current h / w TSF .
*/
static u64 ath_extend_tsf ( struct ath_softc * sc , u32 rstamp )
{
u64 tsf ;
tsf = ath9k_hw_gettsf64 ( sc - > sc_ah ) ;
if ( ( tsf & 0x7fff ) < rstamp )
tsf - = 0x8000 ;
return ( tsf & ~ 0x7fff ) | rstamp ;
}
2008-11-18 06:35:55 +03:00
static struct sk_buff * ath_rxbuf_alloc ( struct ath_softc * sc , u32 len )
2008-08-04 11:16:41 +04:00
{
struct sk_buff * skb ;
u32 off ;
/*
* Cache - line - align . This is important ( for the
* 5210 at least ) as not doing so causes bogus data
* in rx ' d frames .
*/
2008-11-21 04:15:13 +03:00
/* Note: the kernel can allocate a value greater than
* what we ask it to give us . We really only need 4 KB as that
* is this hardware supports and in fact we need at least 3849
* as that is the MAX AMSDU size this hardware supports .
* Unfortunately this means we may get 8 KB here from the
* kernel . . . and that is actually what is observed on some
* systems : ( */
2008-08-04 11:16:41 +04:00
skb = dev_alloc_skb ( len + sc - > sc_cachelsz - 1 ) ;
if ( skb ! = NULL ) {
off = ( ( unsigned long ) skb - > data ) % sc - > sc_cachelsz ;
if ( off ! = 0 )
skb_reserve ( skb , sc - > sc_cachelsz - off ) ;
} else {
DPRINTF ( sc , ATH_DBG_FATAL ,
2008-11-28 19:48:05 +03:00
" skbuff alloc of size %u failed \n " , len ) ;
2008-08-04 11:16:41 +04:00
return NULL ;
}
return skb ;
}
2008-11-18 06:35:55 +03:00
static int ath_rate2idx ( struct ath_softc * sc , int rate )
{
int i = 0 , cur_band , n_rates ;
struct ieee80211_hw * hw = sc - > hw ;
cur_band = hw - > conf . channel - > band ;
n_rates = sc - > sbands [ cur_band ] . n_bitrates ;
for ( i = 0 ; i < n_rates ; i + + ) {
if ( sc - > sbands [ cur_band ] . bitrates [ i ] . bitrate = = rate )
break ;
}
/*
* NB : mac80211 validates rx rate index against the supported legacy rate
* index only ( should be done against ht rates also ) , return the highest
* legacy rate index for rx rate which does not match any one of the
* supported basic and extended rates to make mac80211 happy .
* The following hack will be cleaned up once the issue with
* the rx rate index validation in mac80211 is fixed .
*/
if ( i = = n_rates )
return n_rates - 1 ;
return i ;
2008-08-04 11:16:41 +04:00
}
/*
2008-11-18 06:35:55 +03:00
* For Decrypt or Demic errors , we only mark packet status here and always push
* up the frame up to let mac80211 handle the actual error case , be it no
* decryption key or real decryption error . This let us keep statistics there .
2008-08-04 11:16:41 +04:00
*/
2008-11-18 06:35:55 +03:00
static int ath_rx_prepare ( struct sk_buff * skb , struct ath_desc * ds ,
struct ieee80211_rx_status * rx_status , bool * decrypt_error ,
struct ath_softc * sc )
2008-08-04 11:16:41 +04:00
{
2008-11-18 06:37:53 +03:00
struct ath_rate_table * rate_table = sc - > hw_rate_table [ sc - > sc_curmode ] ;
2008-11-18 06:35:55 +03:00
struct ieee80211_hdr * hdr ;
2008-11-18 06:37:53 +03:00
int ratekbps , rix ;
2008-11-18 06:35:55 +03:00
u8 ratecode ;
__le16 fc ;
hdr = ( struct ieee80211_hdr * ) skb - > data ;
fc = hdr - > frame_control ;
memset ( rx_status , 0 , sizeof ( struct ieee80211_rx_status ) ) ;
if ( ds - > ds_rxstat . rs_more ) {
/*
* Frame spans multiple descriptors ; this cannot happen yet
* as we don ' t support jumbograms . If not in monitor mode ,
* discard the frame . Enable this if you want to see
* error frames in Monitor mode .
*/
2008-12-02 00:38:55 +03:00
if ( sc - > sc_ah - > ah_opmode ! = NL80211_IFTYPE_MONITOR )
2008-11-18 06:35:55 +03:00
goto rx_next ;
} else if ( ds - > ds_rxstat . rs_status ! = 0 ) {
if ( ds - > ds_rxstat . rs_status & ATH9K_RXERR_CRC )
rx_status - > flag | = RX_FLAG_FAILED_FCS_CRC ;
if ( ds - > ds_rxstat . rs_status & ATH9K_RXERR_PHY )
goto rx_next ;
2008-08-04 11:16:41 +04:00
2008-11-18 06:35:55 +03:00
if ( ds - > ds_rxstat . rs_status & ATH9K_RXERR_DECRYPT ) {
* decrypt_error = true ;
} else if ( ds - > ds_rxstat . rs_status & ATH9K_RXERR_MIC ) {
if ( ieee80211_is_ctl ( fc ) )
/*
* Sometimes , we get invalid
* MIC failures on valid control frames .
* Remove these mic errors .
*/
ds - > ds_rxstat . rs_status & = ~ ATH9K_RXERR_MIC ;
else
rx_status - > flag | = RX_FLAG_MMIC_ERROR ;
}
/*
* Reject error frames with the exception of
* decryption and MIC failures . For monitor mode ,
* we also ignore the CRC error .
*/
2008-12-02 00:38:55 +03:00
if ( sc - > sc_ah - > ah_opmode = = NL80211_IFTYPE_MONITOR ) {
2008-11-18 06:35:55 +03:00
if ( ds - > ds_rxstat . rs_status &
~ ( ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
ATH9K_RXERR_CRC ) )
goto rx_next ;
} else {
if ( ds - > ds_rxstat . rs_status &
~ ( ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC ) ) {
goto rx_next ;
}
}
2008-08-04 11:16:41 +04:00
}
2008-11-18 06:35:55 +03:00
ratecode = ds - > ds_rxstat . rs_rate ;
2008-11-18 06:37:53 +03:00
rix = rate_table - > rateCodeToIndex [ ratecode ] ;
ratekbps = rate_table - > info [ rix ] . ratekbps ;
2008-11-18 06:35:55 +03:00
/* HT rate */
if ( ratecode & 0x80 ) {
if ( ds - > ds_rxstat . rs_flags & ATH9K_RX_2040 )
ratekbps = ( ratekbps * 27 ) / 13 ;
if ( ds - > ds_rxstat . rs_flags & ATH9K_RX_GI )
ratekbps = ( ratekbps * 10 ) / 9 ;
}
rx_status - > mactime = ath_extend_tsf ( sc , ds - > ds_rxstat . rs_tstamp ) ;
rx_status - > band = sc - > hw - > conf . channel - > band ;
rx_status - > freq = sc - > hw - > conf . channel - > center_freq ;
rx_status - > noise = sc - > sc_ani . sc_noise_floor ;
rx_status - > signal = rx_status - > noise + ds - > ds_rxstat . rs_rssi ;
rx_status - > rate_idx = ath_rate2idx ( sc , ( ratekbps / 100 ) ) ;
rx_status - > antenna = ds - > ds_rxstat . rs_antenna ;
/* at 45 you will be able to use MCS 15 reliably. A more elaborate
* scheme can be used here but it requires tables of SNR / throughput for
* each possible mode used . */
rx_status - > qual = ds - > ds_rxstat . rs_rssi * 100 / 45 ;
/* rssi can be more than 45 though, anything above that
* should be considered at 100 % */
if ( rx_status - > qual > 100 )
rx_status - > qual = 100 ;
rx_status - > flag | = RX_FLAG_TSFT ;
return 1 ;
rx_next :
return 0 ;
2008-08-04 11:16:41 +04:00
}
static void ath_opmode_init ( struct ath_softc * sc )
{
struct ath_hal * ah = sc - > sc_ah ;
u32 rfilt , mfilt [ 2 ] ;
/* configure rx filter */
rfilt = ath_calcrxfilter ( sc ) ;
ath9k_hw_setrxfilter ( ah , rfilt ) ;
/* configure bssid mask */
2008-08-07 09:22:38 +04:00
if ( ah - > ah_caps . hw_caps & ATH9K_HW_CAP_BSSIDMASK )
2008-08-04 11:16:41 +04:00
ath9k_hw_setbssidmask ( ah , sc - > sc_bssidmask ) ;
/* configure operational mode */
ath9k_hw_setopmode ( ah ) ;
/* Handle any link-level address change. */
ath9k_hw_setmac ( ah , sc - > sc_myaddr ) ;
/* calculate and install multicast filter */
mfilt [ 0 ] = mfilt [ 1 ] = ~ 0 ;
ath9k_hw_setmcastfilter ( ah , mfilt [ 0 ] , mfilt [ 1 ] ) ;
}
int ath_rx_init ( struct ath_softc * sc , int nbufs )
{
struct sk_buff * skb ;
struct ath_buf * bf ;
int error = 0 ;
do {
spin_lock_init ( & sc - > sc_rxflushlock ) ;
2008-08-11 12:35:46 +04:00
sc - > sc_flags & = ~ SC_OP_RXFLUSH ;
2008-08-04 11:16:41 +04:00
spin_lock_init ( & sc - > sc_rxbuflock ) ;
sc - > sc_rxbufsize = roundup ( IEEE80211_MAX_MPDU_LEN ,
min ( sc - > sc_cachelsz ,
( u16 ) 64 ) ) ;
2008-11-28 19:48:05 +03:00
DPRINTF ( sc , ATH_DBG_CONFIG , " cachelsz %u rxbufsize %u \n " ,
sc - > sc_cachelsz , sc - > sc_rxbufsize ) ;
2008-08-04 11:16:41 +04:00
/* Initialize rx descriptors */
error = ath_descdma_setup ( sc , & sc - > sc_rxdma , & sc - > sc_rxbuf ,
" rx " , nbufs , 1 ) ;
if ( error ! = 0 ) {
DPRINTF ( sc , ATH_DBG_FATAL ,
2008-11-28 19:48:05 +03:00
" failed to allocate rx descriptors: %d \n " , error ) ;
2008-08-04 11:16:41 +04:00
break ;
}
list_for_each_entry ( bf , & sc - > sc_rxbuf , list ) {
skb = ath_rxbuf_alloc ( sc , sc - > sc_rxbufsize ) ;
if ( skb = = NULL ) {
error = - ENOMEM ;
break ;
}
bf - > bf_mpdu = skb ;
2008-08-14 11:56:34 +04:00
bf - > bf_buf_addr = pci_map_single ( sc - > pdev , skb - > data ,
2008-11-21 04:15:12 +03:00
sc - > sc_rxbufsize ,
2008-08-14 11:56:34 +04:00
PCI_DMA_FROMDEVICE ) ;
bf - > bf_dmacontext = bf - > bf_buf_addr ;
2008-08-04 11:16:41 +04:00
}
sc - > sc_rxlink = NULL ;
} while ( 0 ) ;
if ( error )
ath_rx_cleanup ( sc ) ;
return error ;
}
void ath_rx_cleanup ( struct ath_softc * sc )
{
struct sk_buff * skb ;
struct ath_buf * bf ;
list_for_each_entry ( bf , & sc - > sc_rxbuf , list ) {
skb = bf - > bf_mpdu ;
if ( skb )
dev_kfree_skb ( skb ) ;
}
if ( sc - > sc_rxdma . dd_desc_len ! = 0 )
ath_descdma_cleanup ( sc , & sc - > sc_rxdma , & sc - > sc_rxbuf ) ;
}
/*
* Calculate the receive filter according to the
* operating mode and state :
*
* o always accept unicast , broadcast , and multicast traffic
* o maintain current state of phy error reception ( the hal
* may enable phy error frames for noise immunity work )
* o probe request frames are accepted only when operating in
* hostap , adhoc , or monitor modes
* o enable promiscuous mode according to the interface state
* o accept beacons :
* - when operating in adhoc mode so the 802.11 layer creates
* node table entries for peers ,
* - when operating in station mode for collecting rssi data when
* the station is otherwise quiet , or
* - when operating as a repeater so we see repeater - sta beacons
* - when scanning
*/
u32 ath_calcrxfilter ( struct ath_softc * sc )
{
# define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
2008-08-11 12:33:13 +04:00
2008-08-04 11:16:41 +04:00
u32 rfilt ;
rfilt = ( ath9k_hw_getrxfilter ( sc - > sc_ah ) & RX_FILTER_PRESERVE )
| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
| ATH9K_RX_FILTER_MCAST ;
/* If not a STA, enable processing of Probe Requests */
2008-12-02 00:38:55 +03:00
if ( sc - > sc_ah - > ah_opmode ! = NL80211_IFTYPE_STATION )
2008-08-04 11:16:41 +04:00
rfilt | = ATH9K_RX_FILTER_PROBEREQ ;
/* Can't set HOSTAP into promiscous mode */
2008-12-02 00:38:55 +03:00
if ( ( ( sc - > sc_ah - > ah_opmode ! = NL80211_IFTYPE_AP ) & &
2008-08-11 12:33:13 +04:00
( sc - > rx_filter & FIF_PROMISC_IN_BSS ) ) | |
2008-12-02 00:38:55 +03:00
( sc - > sc_ah - > ah_opmode = = NL80211_IFTYPE_MONITOR ) ) {
2008-08-04 11:16:41 +04:00
rfilt | = ATH9K_RX_FILTER_PROM ;
/* ??? To prevent from sending ACK */
rfilt & = ~ ATH9K_RX_FILTER_UCAST ;
}
2008-12-02 00:38:55 +03:00
if ( sc - > sc_ah - > ah_opmode = = NL80211_IFTYPE_STATION | |
sc - > sc_ah - > ah_opmode = = NL80211_IFTYPE_ADHOC )
2008-08-04 11:16:41 +04:00
rfilt | = ATH9K_RX_FILTER_BEACON ;
/* If in HOSTAP mode, want to enable reception of PSPOLL frames
& beacon frames */
2008-12-02 00:38:55 +03:00
if ( sc - > sc_ah - > ah_opmode = = NL80211_IFTYPE_AP )
2008-08-04 11:16:41 +04:00
rfilt | = ( ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL ) ;
2008-11-18 06:35:55 +03:00
2008-08-04 11:16:41 +04:00
return rfilt ;
2008-08-11 12:33:13 +04:00
2008-08-04 11:16:41 +04:00
# undef RX_FILTER_PRESERVE
}
int ath_startrecv ( struct ath_softc * sc )
{
struct ath_hal * ah = sc - > sc_ah ;
struct ath_buf * bf , * tbf ;
spin_lock_bh ( & sc - > sc_rxbuflock ) ;
if ( list_empty ( & sc - > sc_rxbuf ) )
goto start_recv ;
sc - > sc_rxlink = NULL ;
list_for_each_entry_safe ( bf , tbf , & sc - > sc_rxbuf , list ) {
ath_rx_buf_link ( sc , bf ) ;
}
/* We could have deleted elements so the list may be empty now */
if ( list_empty ( & sc - > sc_rxbuf ) )
goto start_recv ;
bf = list_first_entry ( & sc - > sc_rxbuf , struct ath_buf , list ) ;
ath9k_hw_putrxbuf ( ah , bf - > bf_daddr ) ;
2008-11-18 06:35:55 +03:00
ath9k_hw_rxena ( ah ) ;
2008-08-04 11:16:41 +04:00
start_recv :
spin_unlock_bh ( & sc - > sc_rxbuflock ) ;
2008-11-18 06:35:55 +03:00
ath_opmode_init ( sc ) ;
ath9k_hw_startpcureceive ( ah ) ;
2008-08-04 11:16:41 +04:00
return 0 ;
}
bool ath_stoprecv ( struct ath_softc * sc )
{
struct ath_hal * ah = sc - > sc_ah ;
bool stopped ;
2008-11-18 06:35:55 +03:00
ath9k_hw_stoppcurecv ( ah ) ;
ath9k_hw_setrxfilter ( ah , 0 ) ;
stopped = ath9k_hw_stopdmarecv ( ah ) ;
mdelay ( 3 ) ; /* 3ms is long enough for 1 frame */
sc - > sc_rxlink = NULL ;
2008-08-04 11:16:41 +04:00
return stopped ;
}
void ath_flushrecv ( struct ath_softc * sc )
{
spin_lock_bh ( & sc - > sc_rxflushlock ) ;
2008-08-11 12:35:46 +04:00
sc - > sc_flags | = SC_OP_RXFLUSH ;
2008-08-04 11:16:41 +04:00
ath_rx_tasklet ( sc , 1 ) ;
2008-08-11 12:35:46 +04:00
sc - > sc_flags & = ~ SC_OP_RXFLUSH ;
2008-08-04 11:16:41 +04:00
spin_unlock_bh ( & sc - > sc_rxflushlock ) ;
}
int ath_rx_tasklet ( struct ath_softc * sc , int flush )
{
# define PA2DESC(_sc, _pa) \
( ( struct ath_desc * ) ( ( caddr_t ) ( _sc ) - > sc_rxdma . dd_desc + \
( ( _pa ) - ( _sc ) - > sc_rxdma . dd_desc_paddr ) ) )
2008-11-18 06:35:55 +03:00
struct ath_buf * bf ;
2008-08-04 11:16:41 +04:00
struct ath_desc * ds ;
2008-11-22 04:41:33 +03:00
struct sk_buff * skb = NULL , * requeue_skb ;
2008-11-18 06:35:55 +03:00
struct ieee80211_rx_status rx_status ;
2008-08-04 11:16:41 +04:00
struct ath_hal * ah = sc - > sc_ah ;
2008-11-18 06:35:55 +03:00
struct ieee80211_hdr * hdr ;
int hdrlen , padsize , retval ;
bool decrypt_error = false ;
u8 keyix ;
spin_lock_bh ( & sc - > sc_rxbuflock ) ;
2008-08-04 11:16:41 +04:00
do {
/* If handling rx interrupt and flush is in progress => exit */
2008-08-11 12:35:46 +04:00
if ( ( sc - > sc_flags & SC_OP_RXFLUSH ) & & ( flush = = 0 ) )
2008-08-04 11:16:41 +04:00
break ;
if ( list_empty ( & sc - > sc_rxbuf ) ) {
sc - > sc_rxlink = NULL ;
break ;
}
bf = list_first_entry ( & sc - > sc_rxbuf , struct ath_buf , list ) ;
ds = bf - > bf_desc ;
/*
* Must provide the virtual address of the current
* descriptor , the physical address , and the virtual
* address of the next descriptor in the h / w chain .
* This allows the HAL to look ahead to see if the
* hardware is done with a descriptor by checking the
* done bit in the following descriptor and the address
* of the current descriptor the DMA engine is working
* on . All this is necessary because of our use of
* a self - linked list to avoid rx overruns .
*/
2008-11-18 06:35:55 +03:00
retval = ath9k_hw_rxprocdesc ( ah , ds ,
2008-08-04 11:16:41 +04:00
bf - > bf_daddr ,
PA2DESC ( sc , ds - > ds_link ) ,
0 ) ;
if ( retval = = - EINPROGRESS ) {
struct ath_buf * tbf ;
struct ath_desc * tds ;
if ( list_is_last ( & bf - > list , & sc - > sc_rxbuf ) ) {
2008-11-18 06:35:55 +03:00
sc - > sc_rxlink = NULL ;
2008-08-04 11:16:41 +04:00
break ;
}
tbf = list_entry ( bf - > list . next , struct ath_buf , list ) ;
/*
* On some hardware the descriptor status words could
* get corrupted , including the done bit . Because of
* this , check if the next descriptor ' s done bit is
* set or not .
*
* If the next descriptor ' s done bit is set , the current
* descriptor has been corrupted . Force s / w to discard
* this descriptor and continue . . .
*/
tds = tbf - > bf_desc ;
2008-11-18 06:35:55 +03:00
retval = ath9k_hw_rxprocdesc ( ah , tds , tbf - > bf_daddr ,
PA2DESC ( sc , tds - > ds_link ) , 0 ) ;
2008-08-04 11:16:41 +04:00
if ( retval = = - EINPROGRESS ) {
break ;
}
}
skb = bf - > bf_mpdu ;
2008-11-18 06:35:55 +03:00
if ( ! skb )
2008-08-04 11:16:41 +04:00
continue ;
/*
2008-11-18 06:35:55 +03:00
* If we ' re asked to flush receive queue , directly
* chain it back at the queue without processing it .
2008-08-04 11:16:41 +04:00
*/
2008-11-18 06:35:55 +03:00
if ( flush )
2008-11-22 04:41:33 +03:00
goto requeue ;
2008-08-04 11:16:41 +04:00
2008-11-18 06:35:55 +03:00
if ( ! ds - > ds_rxstat . rs_datalen )
2008-11-22 04:41:33 +03:00
goto requeue ;
2008-08-04 11:16:41 +04:00
2008-11-18 06:35:55 +03:00
/* The status portion of the descriptor could get corrupted. */
2008-08-04 11:16:41 +04:00
if ( sc - > sc_rxbufsize < ds - > ds_rxstat . rs_datalen )
2008-11-22 04:41:33 +03:00
goto requeue ;
2008-08-04 11:16:41 +04:00
2008-11-18 06:35:55 +03:00
if ( ! ath_rx_prepare ( skb , ds , & rx_status , & decrypt_error , sc ) )
2008-11-22 04:41:33 +03:00
goto requeue ;
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer ' s skb */
requeue_skb = ath_rxbuf_alloc ( sc , sc - > sc_rxbufsize ) ;
/* If there is no memory we ignore the current RX'd frame,
* tell hardware it can give us a new frame using the old
* skb and put it at the tail of the sc - > sc_rxbuf list for
* processing . */
if ( ! requeue_skb )
goto requeue ;
2008-08-04 11:16:41 +04:00
pci_dma_sync_single_for_cpu ( sc - > pdev ,
bf - > bf_buf_addr ,
2008-11-21 04:15:12 +03:00
sc - > sc_rxbufsize ,
2008-08-04 11:16:41 +04:00
PCI_DMA_FROMDEVICE ) ;
2008-11-18 06:35:55 +03:00
pci_unmap_single ( sc - > pdev , bf - > bf_buf_addr ,
2008-08-04 11:16:41 +04:00
sc - > sc_rxbufsize ,
PCI_DMA_FROMDEVICE ) ;
2008-11-18 06:35:55 +03:00
skb_put ( skb , ds - > ds_rxstat . rs_datalen ) ;
skb - > protocol = cpu_to_be16 ( ETH_P_CONTROL ) ;
/* see if any padding is done by the hw and remove it */
hdr = ( struct ieee80211_hdr * ) skb - > data ;
hdrlen = ieee80211_get_hdrlen_from_skb ( skb ) ;
if ( hdrlen & 3 ) {
padsize = hdrlen % 4 ;
memmove ( skb - > data + padsize , skb - > data , hdrlen ) ;
skb_pull ( skb , padsize ) ;
2008-08-04 11:16:41 +04:00
}
2008-11-18 06:35:55 +03:00
keyix = ds - > ds_rxstat . rs_keyix ;
2008-08-04 11:16:41 +04:00
2008-11-18 06:35:55 +03:00
if ( ! ( keyix = = ATH9K_RXKEYIX_INVALID ) & & ! decrypt_error ) {
rx_status . flag | = RX_FLAG_DECRYPTED ;
} else if ( ( le16_to_cpu ( hdr - > frame_control ) & IEEE80211_FCTL_PROTECTED )
& & ! decrypt_error & & skb - > len > = hdrlen + 4 ) {
keyix = skb - > data [ hdrlen + 3 ] > > 6 ;
if ( test_bit ( keyix , sc - > sc_keymap ) )
rx_status . flag | = RX_FLAG_DECRYPTED ;
}
/* Send the frame to mac80211 */
__ieee80211_rx ( sc - > hw , skb , & rx_status ) ;
2008-11-22 04:41:33 +03:00
/* We will now give hardware our shiny new allocated skb */
bf - > bf_mpdu = requeue_skb ;
bf - > bf_buf_addr = pci_map_single ( sc - > pdev , requeue_skb - > data ,
sc - > sc_rxbufsize ,
PCI_DMA_FROMDEVICE ) ;
bf - > bf_dmacontext = bf - > bf_buf_addr ;
2008-08-04 11:16:41 +04:00
/*
* change the default rx antenna if rx diversity chooses the
* other antenna 3 times in a row .
*/
if ( sc - > sc_defant ! = ds - > ds_rxstat . rs_antenna ) {
if ( + + sc - > sc_rxotherant > = 3 )
2008-11-18 06:35:55 +03:00
ath_setdefantenna ( sc , ds - > ds_rxstat . rs_antenna ) ;
2008-08-04 11:16:41 +04:00
} else {
sc - > sc_rxotherant = 0 ;
}
2008-11-22 04:41:33 +03:00
requeue :
list_move_tail ( & bf - > list , & sc - > sc_rxbuf ) ;
ath_rx_buf_link ( sc , bf ) ;
2008-11-18 06:35:55 +03:00
} while ( 1 ) ;
spin_unlock_bh ( & sc - > sc_rxbuflock ) ;
2008-08-04 11:16:41 +04:00
return 0 ;
# undef PA2DESC
}