2008-10-29 07:46:30 +03:00
/*
2009-03-13 06:37:23 +03:00
* Copyright ( c ) 2008 - 2009 Atheros Communications Inc .
2008-10-29 07:46:30 +03:00
*
* Permission to use , copy , modify , and / or distribute this software for any
* purpose with or without fee is hereby granted , provided that the above
* copyright notice and this permission notice appear in all copies .
*
* THE SOFTWARE IS PROVIDED " AS IS " AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS . IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL , DIRECT , INDIRECT , OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
* ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE .
*/
2009-09-14 10:55:05 +04:00
# include "hw.h"
2010-06-12 08:33:42 +04:00
# include "hw-ops.h"
2008-10-29 07:46:30 +03:00
2010-04-16 01:39:26 +04:00
static void ath9k_hw_set_txq_interrupts ( struct ath_hw * ah ,
struct ath9k_tx_queue_info * qi )
{
2010-12-03 06:12:37 +03:00
ath_dbg ( ath9k_hw_common ( ah ) , ATH_DBG_INTERRUPT ,
" tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x \n " ,
ah - > txok_interrupt_mask , ah - > txerr_interrupt_mask ,
ah - > txdesc_interrupt_mask , ah - > txeol_interrupt_mask ,
ah - > txurn_interrupt_mask ) ;
2010-04-16 01:39:26 +04:00
2010-04-16 10:23:57 +04:00
ENABLE_REGWRITE_BUFFER ( ah ) ;
2010-04-16 01:39:26 +04:00
REG_WRITE ( ah , AR_IMR_S0 ,
SM ( ah - > txok_interrupt_mask , AR_IMR_S0_QCU_TXOK )
| SM ( ah - > txdesc_interrupt_mask , AR_IMR_S0_QCU_TXDESC ) ) ;
REG_WRITE ( ah , AR_IMR_S1 ,
SM ( ah - > txerr_interrupt_mask , AR_IMR_S1_QCU_TXERR )
| SM ( ah - > txeol_interrupt_mask , AR_IMR_S1_QCU_TXEOL ) ) ;
ah - > imrs2_reg & = ~ AR_IMR_S2_QCU_TXURN ;
ah - > imrs2_reg | = ( ah - > txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN ) ;
REG_WRITE ( ah , AR_IMR_S2 , ah - > imrs2_reg ) ;
2010-04-16 10:23:57 +04:00
REGWRITE_BUFFER_FLUSH ( ah ) ;
2010-04-16 01:39:26 +04:00
}
u32 ath9k_hw_gettxbuf ( struct ath_hw * ah , u32 q )
{
return REG_READ ( ah , AR_QTXDP ( q ) ) ;
}
EXPORT_SYMBOL ( ath9k_hw_gettxbuf ) ;
void ath9k_hw_puttxbuf ( struct ath_hw * ah , u32 q , u32 txdp )
{
REG_WRITE ( ah , AR_QTXDP ( q ) , txdp ) ;
}
EXPORT_SYMBOL ( ath9k_hw_puttxbuf ) ;
void ath9k_hw_txstart ( struct ath_hw * ah , u32 q )
{
2010-12-03 06:12:37 +03:00
ath_dbg ( ath9k_hw_common ( ah ) , ATH_DBG_QUEUE ,
" Enable TXE on queue: %u \n " , q ) ;
2010-04-16 01:39:26 +04:00
REG_WRITE ( ah , AR_Q_TXE , 1 < < q ) ;
}
EXPORT_SYMBOL ( ath9k_hw_txstart ) ;
void ath9k_hw_cleartxdesc ( struct ath_hw * ah , void * ds )
{
struct ar5416_desc * ads = AR5416DESC ( ds ) ;
ads - > ds_txstatus0 = ads - > ds_txstatus1 = 0 ;
ads - > ds_txstatus2 = ads - > ds_txstatus3 = 0 ;
ads - > ds_txstatus4 = ads - > ds_txstatus5 = 0 ;
ads - > ds_txstatus6 = ads - > ds_txstatus7 = 0 ;
ads - > ds_txstatus8 = ads - > ds_txstatus9 = 0 ;
}
EXPORT_SYMBOL ( ath9k_hw_cleartxdesc ) ;
u32 ath9k_hw_numtxpending ( struct ath_hw * ah , u32 q )
{
u32 npend ;
npend = REG_READ ( ah , AR_QSTS ( q ) ) & AR_Q_STS_PEND_FR_CNT ;
if ( npend = = 0 ) {
if ( REG_READ ( ah , AR_Q_TXE ) & ( 1 < < q ) )
npend = 1 ;
}
return npend ;
}
EXPORT_SYMBOL ( ath9k_hw_numtxpending ) ;
/**
* ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
*
* @ ah : atheros hardware struct
* @ bIncTrigLevel : whether or not the frame trigger level should be updated
*
* The frame trigger level specifies the minimum number of bytes ,
* in units of 64 bytes , that must be DMA ' ed into the PCU TX FIFO
* before the PCU will initiate sending the frame on the air . This can
* mean we initiate transmit before a full frame is on the PCU TX FIFO .
* Resets to 0x1 ( meaning 64 bytes or a full frame , whichever occurs
* first )
*
* Caution must be taken to ensure to set the frame trigger level based
* on the DMA request size . For example if the DMA request size is set to
* 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
* there need to be enough space in the tx FIFO for the requested transfer
* size . Hence the tx FIFO will stop with 512 - 128 = 384 bytes . If we set
* the threshold to a value beyond 6 , then the transmit will hang .
*
* Current dual stream devices have a PCU TX FIFO size of 8 KB .
* Current single stream devices have a PCU TX FIFO size of 4 KB , however ,
* there is a hardware issue which forces us to use 2 KB instead so the
* frame trigger level must not exceed 2 KB for these chipsets .
*/
bool ath9k_hw_updatetxtriglevel ( struct ath_hw * ah , bool bIncTrigLevel )
{
u32 txcfg , curLevel , newLevel ;
if ( ah - > tx_trig_level > = ah - > config . max_txtrig_level )
return false ;
2010-11-08 22:54:47 +03:00
ath9k_hw_disable_interrupts ( ah ) ;
2010-04-16 01:39:26 +04:00
txcfg = REG_READ ( ah , AR_TXCFG ) ;
curLevel = MS ( txcfg , AR_FTRIG ) ;
newLevel = curLevel ;
if ( bIncTrigLevel ) {
if ( curLevel < ah - > config . max_txtrig_level )
newLevel + + ;
} else if ( curLevel > MIN_TX_FIFO_THRESHOLD )
newLevel - - ;
if ( newLevel ! = curLevel )
REG_WRITE ( ah , AR_TXCFG ,
( txcfg & ~ AR_FTRIG ) | SM ( newLevel , AR_FTRIG ) ) ;
2010-11-08 22:54:47 +03:00
ath9k_hw_enable_interrupts ( ah ) ;
2010-04-16 01:39:26 +04:00
ah - > tx_trig_level = newLevel ;
return newLevel ! = curLevel ;
}
EXPORT_SYMBOL ( ath9k_hw_updatetxtriglevel ) ;
bool ath9k_hw_stoptxdma ( struct ath_hw * ah , u32 q )
{
# define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
# define ATH9K_TIME_QUANTUM 100 /* usec */
struct ath_common * common = ath9k_hw_common ( ah ) ;
struct ath9k_hw_capabilities * pCap = & ah - > caps ;
struct ath9k_tx_queue_info * qi ;
u32 tsfLow , j , wait ;
u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM ;
if ( q > = pCap - > total_queues ) {
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE ,
" Stopping TX DMA, invalid queue: %u \n " , q ) ;
2010-04-16 01:39:26 +04:00
return false ;
}
qi = & ah - > txq [ q ] ;
if ( qi - > tqi_type = = ATH9K_TX_QUEUE_INACTIVE ) {
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE ,
" Stopping TX DMA, inactive queue: %u \n " , q ) ;
2010-04-16 01:39:26 +04:00
return false ;
}
REG_WRITE ( ah , AR_Q_TXD , 1 < < q ) ;
for ( wait = wait_time ; wait ! = 0 ; wait - - ) {
if ( ath9k_hw_numtxpending ( ah , q ) = = 0 )
break ;
udelay ( ATH9K_TIME_QUANTUM ) ;
}
if ( ath9k_hw_numtxpending ( ah , q ) ) {
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE ,
" %s: Num of pending TX Frames %d on Q %d \n " ,
__func__ , ath9k_hw_numtxpending ( ah , q ) , q ) ;
2010-04-16 01:39:26 +04:00
for ( j = 0 ; j < 2 ; j + + ) {
tsfLow = REG_READ ( ah , AR_TSF_L32 ) ;
REG_WRITE ( ah , AR_QUIET2 ,
SM ( 10 , AR_QUIET2_QUIET_DUR ) ) ;
REG_WRITE ( ah , AR_QUIET_PERIOD , 100 ) ;
REG_WRITE ( ah , AR_NEXT_QUIET_TIMER , tsfLow > > 10 ) ;
REG_SET_BIT ( ah , AR_TIMER_MODE ,
AR_QUIET_TIMER_EN ) ;
if ( ( REG_READ ( ah , AR_TSF_L32 ) > > 10 ) = = ( tsfLow > > 10 ) )
break ;
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE ,
" TSF has moved while trying to set quiet time TSF: 0x%08x \n " ,
tsfLow ) ;
2010-04-16 01:39:26 +04:00
}
REG_SET_BIT ( ah , AR_DIAG_SW , AR_DIAG_FORCE_CH_IDLE_HIGH ) ;
udelay ( 200 ) ;
REG_CLR_BIT ( ah , AR_TIMER_MODE , AR_QUIET_TIMER_EN ) ;
wait = wait_time ;
while ( ath9k_hw_numtxpending ( ah , q ) ) {
if ( ( - - wait ) = = 0 ) {
2010-12-03 06:12:36 +03:00
ath_err ( common ,
" Failed to stop TX DMA in 100 msec after killing last frame \n " ) ;
2010-04-16 01:39:26 +04:00
break ;
}
udelay ( ATH9K_TIME_QUANTUM ) ;
}
REG_CLR_BIT ( ah , AR_DIAG_SW , AR_DIAG_FORCE_CH_IDLE_HIGH ) ;
}
REG_WRITE ( ah , AR_Q_TXD , 0 ) ;
return wait ! = 0 ;
# undef ATH9K_TX_STOP_DMA_TIMEOUT
# undef ATH9K_TIME_QUANTUM
}
EXPORT_SYMBOL ( ath9k_hw_stoptxdma ) ;
2009-02-09 10:57:12 +03:00
void ath9k_hw_gettxintrtxqs ( struct ath_hw * ah , u32 * txqs )
2008-10-29 07:46:30 +03:00
{
2009-02-09 10:57:26 +03:00
* txqs & = ah - > intr_txqs ;
ah - > intr_txqs & = ~ ( * txqs ) ;
2008-10-29 07:46:30 +03:00
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_gettxintrtxqs ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:12 +03:00
bool ath9k_hw_set_txq_props ( struct ath_hw * ah , int q ,
2008-10-29 07:46:30 +03:00
const struct ath9k_tx_queue_info * qinfo )
{
u32 cw ;
2009-09-13 13:42:02 +04:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2009-02-09 10:57:26 +03:00
struct ath9k_hw_capabilities * pCap = & ah - > caps ;
2008-10-29 07:46:30 +03:00
struct ath9k_tx_queue_info * qi ;
if ( q > = pCap - > total_queues ) {
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE ,
" Set TXQ properties, invalid queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
return false ;
}
2009-02-09 10:57:26 +03:00
qi = & ah - > txq [ q ] ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_type = = ATH9K_TX_QUEUE_INACTIVE ) {
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE ,
" Set TXQ properties, inactive queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
return false ;
}
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE , " Set queue properties for: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
qi - > tqi_ver = qinfo - > tqi_ver ;
qi - > tqi_subtype = qinfo - > tqi_subtype ;
qi - > tqi_qflags = qinfo - > tqi_qflags ;
qi - > tqi_priority = qinfo - > tqi_priority ;
if ( qinfo - > tqi_aifs ! = ATH9K_TXQ_USEDEFAULT )
qi - > tqi_aifs = min ( qinfo - > tqi_aifs , 255U ) ;
else
qi - > tqi_aifs = INIT_AIFS ;
if ( qinfo - > tqi_cwmin ! = ATH9K_TXQ_USEDEFAULT ) {
cw = min ( qinfo - > tqi_cwmin , 1024U ) ;
qi - > tqi_cwmin = 1 ;
while ( qi - > tqi_cwmin < cw )
qi - > tqi_cwmin = ( qi - > tqi_cwmin < < 1 ) | 1 ;
} else
qi - > tqi_cwmin = qinfo - > tqi_cwmin ;
if ( qinfo - > tqi_cwmax ! = ATH9K_TXQ_USEDEFAULT ) {
cw = min ( qinfo - > tqi_cwmax , 1024U ) ;
qi - > tqi_cwmax = 1 ;
while ( qi - > tqi_cwmax < cw )
qi - > tqi_cwmax = ( qi - > tqi_cwmax < < 1 ) | 1 ;
} else
qi - > tqi_cwmax = INIT_CWMAX ;
if ( qinfo - > tqi_shretry ! = 0 )
qi - > tqi_shretry = min ( ( u32 ) qinfo - > tqi_shretry , 15U ) ;
else
qi - > tqi_shretry = INIT_SH_RETRY ;
if ( qinfo - > tqi_lgretry ! = 0 )
qi - > tqi_lgretry = min ( ( u32 ) qinfo - > tqi_lgretry , 15U ) ;
else
qi - > tqi_lgretry = INIT_LG_RETRY ;
qi - > tqi_cbrPeriod = qinfo - > tqi_cbrPeriod ;
qi - > tqi_cbrOverflowLimit = qinfo - > tqi_cbrOverflowLimit ;
qi - > tqi_burstTime = qinfo - > tqi_burstTime ;
qi - > tqi_readyTime = qinfo - > tqi_readyTime ;
switch ( qinfo - > tqi_subtype ) {
case ATH9K_WME_UPSD :
if ( qi - > tqi_type = = ATH9K_TX_QUEUE_DATA )
qi - > tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS ;
break ;
default :
break ;
}
return true ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_set_txq_props ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:12 +03:00
bool ath9k_hw_get_txq_props ( struct ath_hw * ah , int q ,
2008-10-29 07:46:30 +03:00
struct ath9k_tx_queue_info * qinfo )
{
2009-09-13 13:42:02 +04:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2009-02-09 10:57:26 +03:00
struct ath9k_hw_capabilities * pCap = & ah - > caps ;
2008-10-29 07:46:30 +03:00
struct ath9k_tx_queue_info * qi ;
if ( q > = pCap - > total_queues ) {
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE ,
" Get TXQ properties, invalid queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
return false ;
}
2009-02-09 10:57:26 +03:00
qi = & ah - > txq [ q ] ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_type = = ATH9K_TX_QUEUE_INACTIVE ) {
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE ,
" Get TXQ properties, inactive queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
return false ;
}
qinfo - > tqi_qflags = qi - > tqi_qflags ;
qinfo - > tqi_ver = qi - > tqi_ver ;
qinfo - > tqi_subtype = qi - > tqi_subtype ;
qinfo - > tqi_qflags = qi - > tqi_qflags ;
qinfo - > tqi_priority = qi - > tqi_priority ;
qinfo - > tqi_aifs = qi - > tqi_aifs ;
qinfo - > tqi_cwmin = qi - > tqi_cwmin ;
qinfo - > tqi_cwmax = qi - > tqi_cwmax ;
qinfo - > tqi_shretry = qi - > tqi_shretry ;
qinfo - > tqi_lgretry = qi - > tqi_lgretry ;
qinfo - > tqi_cbrPeriod = qi - > tqi_cbrPeriod ;
qinfo - > tqi_cbrOverflowLimit = qi - > tqi_cbrOverflowLimit ;
qinfo - > tqi_burstTime = qi - > tqi_burstTime ;
qinfo - > tqi_readyTime = qi - > tqi_readyTime ;
return true ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_get_txq_props ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:12 +03:00
int ath9k_hw_setuptxqueue ( struct ath_hw * ah , enum ath9k_tx_queue type ,
2008-10-29 07:46:30 +03:00
const struct ath9k_tx_queue_info * qinfo )
{
2009-09-13 13:42:02 +04:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-10-29 07:46:30 +03:00
struct ath9k_tx_queue_info * qi ;
2009-02-09 10:57:26 +03:00
struct ath9k_hw_capabilities * pCap = & ah - > caps ;
2008-10-29 07:46:30 +03:00
int q ;
switch ( type ) {
case ATH9K_TX_QUEUE_BEACON :
q = pCap - > total_queues - 1 ;
break ;
case ATH9K_TX_QUEUE_CAB :
q = pCap - > total_queues - 2 ;
break ;
case ATH9K_TX_QUEUE_PSPOLL :
q = 1 ;
break ;
case ATH9K_TX_QUEUE_UAPSD :
q = pCap - > total_queues - 3 ;
break ;
case ATH9K_TX_QUEUE_DATA :
for ( q = 0 ; q < pCap - > total_queues ; q + + )
2009-02-09 10:57:26 +03:00
if ( ah - > txq [ q ] . tqi_type = =
2008-10-29 07:46:30 +03:00
ATH9K_TX_QUEUE_INACTIVE )
break ;
if ( q = = pCap - > total_queues ) {
2010-12-03 06:12:36 +03:00
ath_err ( common , " No available TX queue \n " ) ;
2008-10-29 07:46:30 +03:00
return - 1 ;
}
break ;
default :
2010-12-03 06:12:36 +03:00
ath_err ( common , " Invalid TX queue type: %u \n " , type ) ;
2008-10-29 07:46:30 +03:00
return - 1 ;
}
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE , " Setup TX queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:26 +03:00
qi = & ah - > txq [ q ] ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_type ! = ATH9K_TX_QUEUE_INACTIVE ) {
2010-12-03 06:12:36 +03:00
ath_err ( common , " TX queue: %u already active \n " , q ) ;
2008-10-29 07:46:30 +03:00
return - 1 ;
}
memset ( qi , 0 , sizeof ( struct ath9k_tx_queue_info ) ) ;
qi - > tqi_type = type ;
if ( qinfo = = NULL ) {
qi - > tqi_qflags =
TXQ_FLAG_TXOKINT_ENABLE
| TXQ_FLAG_TXERRINT_ENABLE
| TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE ;
qi - > tqi_aifs = INIT_AIFS ;
qi - > tqi_cwmin = ATH9K_TXQ_USEDEFAULT ;
qi - > tqi_cwmax = INIT_CWMAX ;
qi - > tqi_shretry = INIT_SH_RETRY ;
qi - > tqi_lgretry = INIT_LG_RETRY ;
qi - > tqi_physCompBuf = 0 ;
} else {
qi - > tqi_physCompBuf = qinfo - > tqi_physCompBuf ;
( void ) ath9k_hw_set_txq_props ( ah , q , qinfo ) ;
}
return q ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_setuptxqueue ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:12 +03:00
bool ath9k_hw_releasetxqueue ( struct ath_hw * ah , u32 q )
2008-10-29 07:46:30 +03:00
{
2009-02-09 10:57:26 +03:00
struct ath9k_hw_capabilities * pCap = & ah - > caps ;
2009-09-13 13:42:02 +04:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-10-29 07:46:30 +03:00
struct ath9k_tx_queue_info * qi ;
if ( q > = pCap - > total_queues ) {
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE ,
" Release TXQ, invalid queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
return false ;
}
2009-02-09 10:57:26 +03:00
qi = & ah - > txq [ q ] ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_type = = ATH9K_TX_QUEUE_INACTIVE ) {
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE ,
" Release TXQ, inactive queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
return false ;
}
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE , " Release TX queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
qi - > tqi_type = ATH9K_TX_QUEUE_INACTIVE ;
2009-02-09 10:57:26 +03:00
ah - > txok_interrupt_mask & = ~ ( 1 < < q ) ;
ah - > txerr_interrupt_mask & = ~ ( 1 < < q ) ;
ah - > txdesc_interrupt_mask & = ~ ( 1 < < q ) ;
ah - > txeol_interrupt_mask & = ~ ( 1 < < q ) ;
ah - > txurn_interrupt_mask & = ~ ( 1 < < q ) ;
2008-10-29 07:46:30 +03:00
ath9k_hw_set_txq_interrupts ( ah , qi ) ;
return true ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_releasetxqueue ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:12 +03:00
bool ath9k_hw_resettxqueue ( struct ath_hw * ah , u32 q )
2008-10-29 07:46:30 +03:00
{
2009-02-09 10:57:26 +03:00
struct ath9k_hw_capabilities * pCap = & ah - > caps ;
2009-09-13 13:42:02 +04:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2009-02-09 10:57:26 +03:00
struct ath9k_channel * chan = ah - > curchan ;
2008-10-29 07:46:30 +03:00
struct ath9k_tx_queue_info * qi ;
u32 cwMin , chanCwMin , value ;
if ( q > = pCap - > total_queues ) {
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE ,
" Reset TXQ, invalid queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
return false ;
}
2009-02-09 10:57:26 +03:00
qi = & ah - > txq [ q ] ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_type = = ATH9K_TX_QUEUE_INACTIVE ) {
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE ,
" Reset TXQ, inactive queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
return true ;
}
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_QUEUE , " Reset TX queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_cwmin = = ATH9K_TXQ_USEDEFAULT ) {
if ( chan & & IS_CHAN_B ( chan ) )
chanCwMin = INIT_CWMIN_11B ;
else
chanCwMin = INIT_CWMIN ;
for ( cwMin = 1 ; cwMin < chanCwMin ; cwMin = ( cwMin < < 1 ) | 1 ) ;
} else
cwMin = qi - > tqi_cwmin ;
2010-04-16 10:23:57 +04:00
ENABLE_REGWRITE_BUFFER ( ah ) ;
2008-10-29 07:46:30 +03:00
REG_WRITE ( ah , AR_DLCL_IFS ( q ) ,
SM ( cwMin , AR_D_LCL_IFS_CWMIN ) |
SM ( qi - > tqi_cwmax , AR_D_LCL_IFS_CWMAX ) |
SM ( qi - > tqi_aifs , AR_D_LCL_IFS_AIFS ) ) ;
REG_WRITE ( ah , AR_DRETRY_LIMIT ( q ) ,
SM ( INIT_SSH_RETRY , AR_D_RETRY_LIMIT_STA_SH ) |
SM ( INIT_SLG_RETRY , AR_D_RETRY_LIMIT_STA_LG ) |
SM ( qi - > tqi_shretry , AR_D_RETRY_LIMIT_FR_SH ) ) ;
REG_WRITE ( ah , AR_QMISC ( q ) , AR_Q_MISC_DCU_EARLY_TERM_REQ ) ;
REG_WRITE ( ah , AR_DMISC ( q ) ,
AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2 ) ;
if ( qi - > tqi_cbrPeriod ) {
REG_WRITE ( ah , AR_QCBRCFG ( q ) ,
SM ( qi - > tqi_cbrPeriod , AR_Q_CBRCFG_INTERVAL ) |
SM ( qi - > tqi_cbrOverflowLimit , AR_Q_CBRCFG_OVF_THRESH ) ) ;
REG_WRITE ( ah , AR_QMISC ( q ) ,
REG_READ ( ah , AR_QMISC ( q ) ) | AR_Q_MISC_FSP_CBR |
( qi - > tqi_cbrOverflowLimit ?
AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0 ) ) ;
}
if ( qi - > tqi_readyTime & & ( qi - > tqi_type ! = ATH9K_TX_QUEUE_CAB ) ) {
REG_WRITE ( ah , AR_QRDYTIMECFG ( q ) ,
SM ( qi - > tqi_readyTime , AR_Q_RDYTIMECFG_DURATION ) |
AR_Q_RDYTIMECFG_EN ) ;
}
REG_WRITE ( ah , AR_DCHNTIME ( q ) ,
SM ( qi - > tqi_burstTime , AR_D_CHNTIME_DUR ) |
( qi - > tqi_burstTime ? AR_D_CHNTIME_EN : 0 ) ) ;
if ( qi - > tqi_burstTime
& & ( qi - > tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE ) ) {
REG_WRITE ( ah , AR_QMISC ( q ) ,
REG_READ ( ah , AR_QMISC ( q ) ) |
AR_Q_MISC_RDYTIME_EXP_POLICY ) ;
}
if ( qi - > tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE ) {
REG_WRITE ( ah , AR_DMISC ( q ) ,
REG_READ ( ah , AR_DMISC ( q ) ) |
AR_D_MISC_POST_FR_BKOFF_DIS ) ;
}
2010-04-16 10:23:57 +04:00
REGWRITE_BUFFER_FLUSH ( ah ) ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE ) {
REG_WRITE ( ah , AR_DMISC ( q ) ,
REG_READ ( ah , AR_DMISC ( q ) ) |
AR_D_MISC_FRAG_BKOFF_EN ) ;
}
switch ( qi - > tqi_type ) {
case ATH9K_TX_QUEUE_BEACON :
2010-04-16 10:23:57 +04:00
ENABLE_REGWRITE_BUFFER ( ah ) ;
2008-10-29 07:46:30 +03:00
REG_WRITE ( ah , AR_QMISC ( q ) , REG_READ ( ah , AR_QMISC ( q ) )
| AR_Q_MISC_FSP_DBA_GATED
| AR_Q_MISC_BEACON_USE
| AR_Q_MISC_CBR_INCR_DIS1 ) ;
REG_WRITE ( ah , AR_DMISC ( q ) , REG_READ ( ah , AR_DMISC ( q ) )
| ( AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL < <
AR_D_MISC_ARB_LOCKOUT_CNTRL_S )
| AR_D_MISC_BEACON_USE
| AR_D_MISC_POST_FR_BKOFF_DIS ) ;
2010-04-16 10:23:57 +04:00
REGWRITE_BUFFER_FLUSH ( ah ) ;
2010-06-15 04:17:36 +04:00
/*
* cwmin and cwmax should be 0 for beacon queue
* but not for IBSS as we would create an imbalance
* on beaconing fairness for participating nodes .
*/
if ( AR_SREV_9300_20_OR_LATER ( ah ) & &
ah - > opmode ! = NL80211_IFTYPE_ADHOC ) {
2010-04-16 01:39:32 +04:00
REG_WRITE ( ah , AR_DLCL_IFS ( q ) , SM ( 0 , AR_D_LCL_IFS_CWMIN )
| SM ( 0 , AR_D_LCL_IFS_CWMAX )
| SM ( qi - > tqi_aifs , AR_D_LCL_IFS_AIFS ) ) ;
}
2008-10-29 07:46:30 +03:00
break ;
case ATH9K_TX_QUEUE_CAB :
2010-04-16 10:23:57 +04:00
ENABLE_REGWRITE_BUFFER ( ah ) ;
2008-10-29 07:46:30 +03:00
REG_WRITE ( ah , AR_QMISC ( q ) , REG_READ ( ah , AR_QMISC ( q ) )
| AR_Q_MISC_FSP_DBA_GATED
| AR_Q_MISC_CBR_INCR_DIS1
| AR_Q_MISC_CBR_INCR_DIS0 ) ;
value = ( qi - > tqi_readyTime -
2009-02-09 10:57:26 +03:00
( ah - > config . sw_beacon_response_time -
ah - > config . dma_beacon_response_time ) -
ah - > config . additional_swba_backoff ) * 1024 ;
2008-10-29 07:46:30 +03:00
REG_WRITE ( ah , AR_QRDYTIMECFG ( q ) ,
value | AR_Q_RDYTIMECFG_EN ) ;
REG_WRITE ( ah , AR_DMISC ( q ) , REG_READ ( ah , AR_DMISC ( q ) )
| ( AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL < <
AR_D_MISC_ARB_LOCKOUT_CNTRL_S ) ) ;
2010-04-16 10:23:57 +04:00
REGWRITE_BUFFER_FLUSH ( ah ) ;
2008-10-29 07:46:30 +03:00
break ;
case ATH9K_TX_QUEUE_PSPOLL :
REG_WRITE ( ah , AR_QMISC ( q ) ,
REG_READ ( ah , AR_QMISC ( q ) ) | AR_Q_MISC_CBR_INCR_DIS1 ) ;
break ;
case ATH9K_TX_QUEUE_UAPSD :
REG_WRITE ( ah , AR_DMISC ( q ) , REG_READ ( ah , AR_DMISC ( q ) ) |
AR_D_MISC_POST_FR_BKOFF_DIS ) ;
break ;
default :
break ;
}
if ( qi - > tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS ) {
REG_WRITE ( ah , AR_DMISC ( q ) ,
REG_READ ( ah , AR_DMISC ( q ) ) |
SM ( AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL ,
AR_D_MISC_ARB_LOCKOUT_CNTRL ) |
AR_D_MISC_POST_FR_BKOFF_DIS ) ;
}
2010-04-16 01:39:31 +04:00
if ( AR_SREV_9300_20_OR_LATER ( ah ) )
REG_WRITE ( ah , AR_Q_DESC_CRCCHK , AR_Q_DESC_CRCCHK_EN ) ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE )
2009-02-09 10:57:26 +03:00
ah - > txok_interrupt_mask | = 1 < < q ;
2008-10-29 07:46:30 +03:00
else
2009-02-09 10:57:26 +03:00
ah - > txok_interrupt_mask & = ~ ( 1 < < q ) ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE )
2009-02-09 10:57:26 +03:00
ah - > txerr_interrupt_mask | = 1 < < q ;
2008-10-29 07:46:30 +03:00
else
2009-02-09 10:57:26 +03:00
ah - > txerr_interrupt_mask & = ~ ( 1 < < q ) ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE )
2009-02-09 10:57:26 +03:00
ah - > txdesc_interrupt_mask | = 1 < < q ;
2008-10-29 07:46:30 +03:00
else
2009-02-09 10:57:26 +03:00
ah - > txdesc_interrupt_mask & = ~ ( 1 < < q ) ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE )
2009-02-09 10:57:26 +03:00
ah - > txeol_interrupt_mask | = 1 < < q ;
2008-10-29 07:46:30 +03:00
else
2009-02-09 10:57:26 +03:00
ah - > txeol_interrupt_mask & = ~ ( 1 < < q ) ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE )
2009-02-09 10:57:26 +03:00
ah - > txurn_interrupt_mask | = 1 < < q ;
2008-10-29 07:46:30 +03:00
else
2009-02-09 10:57:26 +03:00
ah - > txurn_interrupt_mask & = ~ ( 1 < < q ) ;
2008-10-29 07:46:30 +03:00
ath9k_hw_set_txq_interrupts ( ah , qi ) ;
return true ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_resettxqueue ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:12 +03:00
int ath9k_hw_rxprocdesc ( struct ath_hw * ah , struct ath_desc * ds ,
2010-03-30 07:09:27 +04:00
struct ath_rx_status * rs , u64 tsf )
2008-10-29 07:46:30 +03:00
{
struct ar5416_desc ads ;
struct ar5416_desc * adsp = AR5416DESC ( ds ) ;
u32 phyerr ;
if ( ( adsp - > ds_rxstatus8 & AR_RxDone ) = = 0 )
return - EINPROGRESS ;
ads . u . rx = adsp - > u . rx ;
2010-03-30 07:09:27 +04:00
rs - > rs_status = 0 ;
rs - > rs_flags = 0 ;
2008-10-29 07:46:30 +03:00
2010-03-30 07:09:27 +04:00
rs - > rs_datalen = ads . ds_rxstatus1 & AR_DataLen ;
rs - > rs_tstamp = ads . AR_RcvTimestamp ;
2008-10-29 07:46:30 +03:00
2009-07-15 04:17:08 +04:00
if ( ads . ds_rxstatus8 & AR_PostDelimCRCErr ) {
2010-03-30 07:09:27 +04:00
rs - > rs_rssi = ATH9K_RSSI_BAD ;
rs - > rs_rssi_ctl0 = ATH9K_RSSI_BAD ;
rs - > rs_rssi_ctl1 = ATH9K_RSSI_BAD ;
rs - > rs_rssi_ctl2 = ATH9K_RSSI_BAD ;
rs - > rs_rssi_ext0 = ATH9K_RSSI_BAD ;
rs - > rs_rssi_ext1 = ATH9K_RSSI_BAD ;
rs - > rs_rssi_ext2 = ATH9K_RSSI_BAD ;
2009-07-15 04:17:08 +04:00
} else {
2010-03-30 07:09:27 +04:00
rs - > rs_rssi = MS ( ads . ds_rxstatus4 , AR_RxRSSICombined ) ;
rs - > rs_rssi_ctl0 = MS ( ads . ds_rxstatus0 ,
2009-07-15 04:17:08 +04:00
AR_RxRSSIAnt00 ) ;
2010-03-30 07:09:27 +04:00
rs - > rs_rssi_ctl1 = MS ( ads . ds_rxstatus0 ,
2009-07-15 04:17:08 +04:00
AR_RxRSSIAnt01 ) ;
2010-03-30 07:09:27 +04:00
rs - > rs_rssi_ctl2 = MS ( ads . ds_rxstatus0 ,
2009-07-15 04:17:08 +04:00
AR_RxRSSIAnt02 ) ;
2010-03-30 07:09:27 +04:00
rs - > rs_rssi_ext0 = MS ( ads . ds_rxstatus4 ,
2009-07-15 04:17:08 +04:00
AR_RxRSSIAnt10 ) ;
2010-03-30 07:09:27 +04:00
rs - > rs_rssi_ext1 = MS ( ads . ds_rxstatus4 ,
2009-07-15 04:17:08 +04:00
AR_RxRSSIAnt11 ) ;
2010-03-30 07:09:27 +04:00
rs - > rs_rssi_ext2 = MS ( ads . ds_rxstatus4 ,
2009-07-15 04:17:08 +04:00
AR_RxRSSIAnt12 ) ;
}
2008-10-29 07:46:30 +03:00
if ( ads . ds_rxstatus8 & AR_RxKeyIdxValid )
2010-03-30 07:09:27 +04:00
rs - > rs_keyix = MS ( ads . ds_rxstatus8 , AR_KeyIdx ) ;
2008-10-29 07:46:30 +03:00
else
2010-03-30 07:09:27 +04:00
rs - > rs_keyix = ATH9K_RXKEYIX_INVALID ;
2008-10-29 07:46:30 +03:00
2010-03-30 07:09:27 +04:00
rs - > rs_rate = RXSTATUS_RATE ( ah , ( & ads ) ) ;
rs - > rs_more = ( ads . ds_rxstatus1 & AR_RxMore ) ? 1 : 0 ;
2008-10-29 07:46:30 +03:00
2010-03-30 07:09:27 +04:00
rs - > rs_isaggr = ( ads . ds_rxstatus8 & AR_RxAggr ) ? 1 : 0 ;
rs - > rs_moreaggr =
2008-10-29 07:46:30 +03:00
( ads . ds_rxstatus8 & AR_RxMoreAggr ) ? 1 : 0 ;
2010-03-30 07:09:27 +04:00
rs - > rs_antenna = MS ( ads . ds_rxstatus3 , AR_RxAntenna ) ;
rs - > rs_flags =
2008-10-29 07:46:30 +03:00
( ads . ds_rxstatus3 & AR_GI ) ? ATH9K_RX_GI : 0 ;
2010-03-30 07:09:27 +04:00
rs - > rs_flags | =
2008-10-29 07:46:30 +03:00
( ads . ds_rxstatus3 & AR_2040 ) ? ATH9K_RX_2040 : 0 ;
if ( ads . ds_rxstatus8 & AR_PreDelimCRCErr )
2010-03-30 07:09:27 +04:00
rs - > rs_flags | = ATH9K_RX_DELIM_CRC_PRE ;
2008-10-29 07:46:30 +03:00
if ( ads . ds_rxstatus8 & AR_PostDelimCRCErr )
2010-03-30 07:09:27 +04:00
rs - > rs_flags | = ATH9K_RX_DELIM_CRC_POST ;
2008-10-29 07:46:30 +03:00
if ( ads . ds_rxstatus8 & AR_DecryptBusyErr )
2010-03-30 07:09:27 +04:00
rs - > rs_flags | = ATH9K_RX_DECRYPT_BUSY ;
2008-10-29 07:46:30 +03:00
if ( ( ads . ds_rxstatus8 & AR_RxFrameOK ) = = 0 ) {
if ( ads . ds_rxstatus8 & AR_CRCErr )
2010-03-30 07:09:27 +04:00
rs - > rs_status | = ATH9K_RXERR_CRC ;
2010-12-28 17:46:16 +03:00
if ( ads . ds_rxstatus8 & AR_PHYErr ) {
2010-03-30 07:09:27 +04:00
rs - > rs_status | = ATH9K_RXERR_PHY ;
2008-10-29 07:46:30 +03:00
phyerr = MS ( ads . ds_rxstatus8 , AR_PHYErrCode ) ;
2010-03-30 07:09:27 +04:00
rs - > rs_phyerr = phyerr ;
2010-12-28 17:46:16 +03:00
}
if ( ads . ds_rxstatus8 & AR_DecryptCRCErr )
2010-03-30 07:09:27 +04:00
rs - > rs_status | = ATH9K_RXERR_DECRYPT ;
2010-12-28 17:46:16 +03:00
if ( ads . ds_rxstatus8 & AR_MichaelErr )
2010-03-30 07:09:27 +04:00
rs - > rs_status | = ATH9K_RXERR_MIC ;
2010-12-28 17:46:16 +03:00
if ( ads . ds_rxstatus8 & AR_KeyMiss )
2010-09-14 20:38:26 +04:00
rs - > rs_status | = ATH9K_RXERR_DECRYPT ;
2008-10-29 07:46:30 +03:00
}
return 0 ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_rxprocdesc ) ;
2008-10-29 07:46:30 +03:00
2009-11-24 10:53:25 +03:00
/*
* This can stop or re - enables RX .
*
* If bool is set this will kill any frame which is currently being
* transferred between the MAC and baseband and also prevent any new
* frames from getting started .
*/
2009-02-09 10:57:12 +03:00
bool ath9k_hw_setrxabort ( struct ath_hw * ah , bool set )
2008-10-29 07:46:30 +03:00
{
u32 reg ;
if ( set ) {
REG_SET_BIT ( ah , AR_DIAG_SW ,
( AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT ) ) ;
2009-02-16 10:53:20 +03:00
if ( ! ath9k_hw_wait ( ah , AR_OBS_BUS_1 , AR_OBS_BUS_1_RX_STATE ,
0 , AH_WAIT_TIMEOUT ) ) {
2008-10-29 07:46:30 +03:00
REG_CLR_BIT ( ah , AR_DIAG_SW ,
( AR_DIAG_RX_DIS |
AR_DIAG_RX_ABORT ) ) ;
reg = REG_READ ( ah , AR_OBS_BUS_1 ) ;
2010-12-03 06:12:36 +03:00
ath_err ( ath9k_hw_common ( ah ) ,
" RX failed to go idle in 10 ms RXSM=0x%x \n " ,
reg ) ;
2008-10-29 07:46:30 +03:00
return false ;
}
} else {
REG_CLR_BIT ( ah , AR_DIAG_SW ,
( AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT ) ) ;
}
return true ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_setrxabort ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:12 +03:00
void ath9k_hw_putrxbuf ( struct ath_hw * ah , u32 rxdp )
2008-10-29 07:46:30 +03:00
{
REG_WRITE ( ah , AR_RXDP , rxdp ) ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_putrxbuf ) ;
2008-10-29 07:46:30 +03:00
2010-06-12 08:33:44 +04:00
void ath9k_hw_startpcureceive ( struct ath_hw * ah , bool is_scanning )
2008-10-29 07:46:30 +03:00
{
ath9k_enable_mib_counters ( ah ) ;
2010-06-12 08:33:44 +04:00
ath9k_ani_reset ( ah , is_scanning ) ;
2008-12-08 17:13:48 +03:00
2008-12-08 17:13:50 +03:00
REG_CLR_BIT ( ah , AR_DIAG_SW , ( AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT ) ) ;
2008-10-29 07:46:30 +03:00
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_startpcureceive ) ;
2008-10-29 07:46:30 +03:00
2010-04-16 01:39:41 +04:00
void ath9k_hw_abortpcurecv ( struct ath_hw * ah )
{
REG_SET_BIT ( ah , AR_DIAG_SW , AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS ) ;
ath9k_hw_disable_mib_counters ( ah ) ;
}
EXPORT_SYMBOL ( ath9k_hw_abortpcurecv ) ;
2009-02-09 10:57:12 +03:00
bool ath9k_hw_stopdmarecv ( struct ath_hw * ah )
2008-10-29 07:46:30 +03:00
{
2009-02-16 10:53:20 +03:00
# define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
# define AH_RX_TIME_QUANTUM 100 /* usec */
2009-09-13 13:42:02 +04:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2009-02-16 10:53:20 +03:00
int i ;
2008-10-29 07:46:30 +03:00
REG_WRITE ( ah , AR_CR , AR_CR_RXD ) ;
2009-02-16 10:53:20 +03:00
/* Wait for rx enable bit to go low */
for ( i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM ; i ! = 0 ; i - - ) {
if ( ( REG_READ ( ah , AR_CR ) & AR_CR_RXE ) = = 0 )
break ;
udelay ( AH_TIME_QUANTUM ) ;
}
if ( i = = 0 ) {
2010-12-03 06:12:36 +03:00
ath_err ( common ,
" DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x \n " ,
AH_RX_STOP_DMA_TIMEOUT / 1000 ,
REG_READ ( ah , AR_CR ) ,
REG_READ ( ah , AR_DIAG_SW ) ) ;
2008-10-29 07:46:30 +03:00
return false ;
} else {
return true ;
}
2009-02-16 10:53:20 +03:00
# undef AH_RX_TIME_QUANTUM
# undef AH_RX_STOP_DMA_TIMEOUT
2008-10-29 07:46:30 +03:00
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_stopdmarecv ) ;
2009-10-07 05:19:11 +04:00
int ath9k_hw_beaconq_setup ( struct ath_hw * ah )
{
struct ath9k_tx_queue_info qi ;
memset ( & qi , 0 , sizeof ( qi ) ) ;
qi . tqi_aifs = 1 ;
qi . tqi_cwmin = 0 ;
qi . tqi_cwmax = 0 ;
/* NB: don't enable any interrupts */
return ath9k_hw_setuptxqueue ( ah , ATH9K_TX_QUEUE_BEACON , & qi ) ;
}
EXPORT_SYMBOL ( ath9k_hw_beaconq_setup ) ;
2010-04-16 01:39:06 +04:00
bool ath9k_hw_intrpend ( struct ath_hw * ah )
{
u32 host_isr ;
if ( AR_SREV_9100 ( ah ) )
return true ;
host_isr = REG_READ ( ah , AR_INTR_ASYNC_CAUSE ) ;
if ( ( host_isr & AR_INTR_MAC_IRQ ) & & ( host_isr ! = AR_INTR_SPURIOUS ) )
return true ;
host_isr = REG_READ ( ah , AR_INTR_SYNC_CAUSE ) ;
if ( ( host_isr & AR_INTR_SYNC_DEFAULT )
& & ( host_isr ! = AR_INTR_SPURIOUS ) )
return true ;
return false ;
}
EXPORT_SYMBOL ( ath9k_hw_intrpend ) ;
2010-11-08 22:54:47 +03:00
void ath9k_hw_disable_interrupts ( struct ath_hw * ah )
{
struct ath_common * common = ath9k_hw_common ( ah ) ;
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_INTERRUPT , " disable IER \n " ) ;
2010-11-08 22:54:47 +03:00
REG_WRITE ( ah , AR_IER , AR_IER_DISABLE ) ;
( void ) REG_READ ( ah , AR_IER ) ;
if ( ! AR_SREV_9100 ( ah ) ) {
REG_WRITE ( ah , AR_INTR_ASYNC_ENABLE , 0 ) ;
( void ) REG_READ ( ah , AR_INTR_ASYNC_ENABLE ) ;
REG_WRITE ( ah , AR_INTR_SYNC_ENABLE , 0 ) ;
( void ) REG_READ ( ah , AR_INTR_SYNC_ENABLE ) ;
}
}
EXPORT_SYMBOL ( ath9k_hw_disable_interrupts ) ;
void ath9k_hw_enable_interrupts ( struct ath_hw * ah )
{
struct ath_common * common = ath9k_hw_common ( ah ) ;
if ( ! ( ah - > imask & ATH9K_INT_GLOBAL ) )
return ;
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_INTERRUPT , " enable IER \n " ) ;
2010-11-08 22:54:47 +03:00
REG_WRITE ( ah , AR_IER , AR_IER_ENABLE ) ;
if ( ! AR_SREV_9100 ( ah ) ) {
REG_WRITE ( ah , AR_INTR_ASYNC_ENABLE ,
AR_INTR_MAC_IRQ ) ;
REG_WRITE ( ah , AR_INTR_ASYNC_MASK , AR_INTR_MAC_IRQ ) ;
REG_WRITE ( ah , AR_INTR_SYNC_ENABLE ,
AR_INTR_SYNC_DEFAULT ) ;
REG_WRITE ( ah , AR_INTR_SYNC_MASK ,
AR_INTR_SYNC_DEFAULT ) ;
}
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_INTERRUPT , " AR_IMR 0x%x IER 0x%x \n " ,
REG_READ ( ah , AR_IMR ) , REG_READ ( ah , AR_IER ) ) ;
2010-11-08 22:54:47 +03:00
}
EXPORT_SYMBOL ( ath9k_hw_enable_interrupts ) ;
void ath9k_hw_set_interrupts ( struct ath_hw * ah , enum ath9k_int ints )
2010-04-16 01:39:06 +04:00
{
enum ath9k_int omask = ah - > imask ;
u32 mask , mask2 ;
struct ath9k_hw_capabilities * pCap = & ah - > caps ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
2010-11-08 22:54:47 +03:00
if ( ! ( ints & ATH9K_INT_GLOBAL ) )
ath9k_hw_enable_interrupts ( ah ) ;
2010-04-16 01:39:06 +04:00
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_INTERRUPT , " 0x%x => 0x%x \n " , omask , ints ) ;
2010-04-16 01:39:06 +04:00
/* TODO: global int Ref count */
mask = ints & ATH9K_INT_COMMON ;
mask2 = 0 ;
if ( ints & ATH9K_INT_TX ) {
if ( ah - > config . tx_intr_mitigation )
mask | = AR_IMR_TXMINTR | AR_IMR_TXINTM ;
2010-04-26 23:04:41 +04:00
else {
if ( ah - > txok_interrupt_mask )
mask | = AR_IMR_TXOK ;
if ( ah - > txdesc_interrupt_mask )
mask | = AR_IMR_TXDESC ;
}
2010-04-16 01:39:06 +04:00
if ( ah - > txerr_interrupt_mask )
mask | = AR_IMR_TXERR ;
if ( ah - > txeol_interrupt_mask )
mask | = AR_IMR_TXEOL ;
}
if ( ints & ATH9K_INT_RX ) {
if ( AR_SREV_9300_20_OR_LATER ( ah ) ) {
mask | = AR_IMR_RXERR | AR_IMR_RXOK_HP ;
if ( ah - > config . rx_intr_mitigation ) {
mask & = ~ AR_IMR_RXOK_LP ;
mask | = AR_IMR_RXMINTR | AR_IMR_RXINTM ;
} else {
mask | = AR_IMR_RXOK_LP ;
}
} else {
if ( ah - > config . rx_intr_mitigation )
mask | = AR_IMR_RXMINTR | AR_IMR_RXINTM ;
else
mask | = AR_IMR_RXOK | AR_IMR_RXDESC ;
}
if ( ! ( pCap - > hw_caps & ATH9K_HW_CAP_AUTOSLEEP ) )
mask | = AR_IMR_GENTMR ;
}
if ( ints & ( ATH9K_INT_BMISC ) ) {
mask | = AR_IMR_BCNMISC ;
if ( ints & ATH9K_INT_TIM )
mask2 | = AR_IMR_S2_TIM ;
if ( ints & ATH9K_INT_DTIM )
mask2 | = AR_IMR_S2_DTIM ;
if ( ints & ATH9K_INT_DTIMSYNC )
mask2 | = AR_IMR_S2_DTIMSYNC ;
if ( ints & ATH9K_INT_CABEND )
mask2 | = AR_IMR_S2_CABEND ;
if ( ints & ATH9K_INT_TSFOOR )
mask2 | = AR_IMR_S2_TSFOOR ;
}
if ( ints & ( ATH9K_INT_GTT | ATH9K_INT_CST ) ) {
mask | = AR_IMR_BCNMISC ;
if ( ints & ATH9K_INT_GTT )
mask2 | = AR_IMR_S2_GTT ;
if ( ints & ATH9K_INT_CST )
mask2 | = AR_IMR_S2_CST ;
}
2010-12-03 06:12:37 +03:00
ath_dbg ( common , ATH_DBG_INTERRUPT , " new IMR 0x%x \n " , mask ) ;
2010-04-16 01:39:06 +04:00
REG_WRITE ( ah , AR_IMR , mask ) ;
ah - > imrs2_reg & = ~ ( AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST ) ;
ah - > imrs2_reg | = mask2 ;
REG_WRITE ( ah , AR_IMR_S2 , ah - > imrs2_reg ) ;
if ( ! ( pCap - > hw_caps & ATH9K_HW_CAP_AUTOSLEEP ) ) {
if ( ints & ATH9K_INT_TIM_TIMER )
REG_SET_BIT ( ah , AR_IMR_S5 , AR_IMR_S5_TIM_TIMER ) ;
else
REG_CLR_BIT ( ah , AR_IMR_S5 , AR_IMR_S5_TIM_TIMER ) ;
}
2010-11-08 22:54:47 +03:00
ath9k_hw_enable_interrupts ( ah ) ;
2010-04-16 01:39:06 +04:00
2010-11-08 22:54:47 +03:00
return ;
2010-04-16 01:39:06 +04:00
}
EXPORT_SYMBOL ( ath9k_hw_set_interrupts ) ;