2008-10-29 07:46:30 +03:00
/*
2011-05-17 12:06:18 +04:00
* Copyright ( c ) 2008 - 2011 Atheros Communications Inc .
2008-10-29 07:46:30 +03:00
*
* Permission to use , copy , modify , and / or distribute this software for any
* purpose with or without fee is hereby granted , provided that the above
* copyright notice and this permission notice appear in all copies .
*
* THE SOFTWARE IS PROVIDED " AS IS " AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS . IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL , DIRECT , INDIRECT , OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
* ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE .
*/
2009-09-14 10:55:05 +04:00
# include "hw.h"
2010-06-12 08:33:42 +04:00
# include "hw-ops.h"
2011-05-28 00:14:23 +04:00
# include <linux/export.h>
2008-10-29 07:46:30 +03:00
2010-04-16 01:39:26 +04:00
static void ath9k_hw_set_txq_interrupts ( struct ath_hw * ah ,
struct ath9k_tx_queue_info * qi )
{
2011-12-16 02:55:53 +04:00
ath_dbg ( ath9k_hw_common ( ah ) , INTERRUPT ,
2010-12-03 06:12:37 +03:00
" tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x \n " ,
ah - > txok_interrupt_mask , ah - > txerr_interrupt_mask ,
ah - > txdesc_interrupt_mask , ah - > txeol_interrupt_mask ,
ah - > txurn_interrupt_mask ) ;
2010-04-16 01:39:26 +04:00
2010-04-16 10:23:57 +04:00
ENABLE_REGWRITE_BUFFER ( ah ) ;
2010-04-16 01:39:26 +04:00
REG_WRITE ( ah , AR_IMR_S0 ,
SM ( ah - > txok_interrupt_mask , AR_IMR_S0_QCU_TXOK )
| SM ( ah - > txdesc_interrupt_mask , AR_IMR_S0_QCU_TXDESC ) ) ;
REG_WRITE ( ah , AR_IMR_S1 ,
SM ( ah - > txerr_interrupt_mask , AR_IMR_S1_QCU_TXERR )
| SM ( ah - > txeol_interrupt_mask , AR_IMR_S1_QCU_TXEOL ) ) ;
ah - > imrs2_reg & = ~ AR_IMR_S2_QCU_TXURN ;
ah - > imrs2_reg | = ( ah - > txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN ) ;
REG_WRITE ( ah , AR_IMR_S2 , ah - > imrs2_reg ) ;
2010-04-16 10:23:57 +04:00
REGWRITE_BUFFER_FLUSH ( ah ) ;
2010-04-16 01:39:26 +04:00
}
u32 ath9k_hw_gettxbuf ( struct ath_hw * ah , u32 q )
{
return REG_READ ( ah , AR_QTXDP ( q ) ) ;
}
EXPORT_SYMBOL ( ath9k_hw_gettxbuf ) ;
void ath9k_hw_puttxbuf ( struct ath_hw * ah , u32 q , u32 txdp )
{
REG_WRITE ( ah , AR_QTXDP ( q ) , txdp ) ;
}
EXPORT_SYMBOL ( ath9k_hw_puttxbuf ) ;
void ath9k_hw_txstart ( struct ath_hw * ah , u32 q )
{
2011-12-16 02:55:53 +04:00
ath_dbg ( ath9k_hw_common ( ah ) , QUEUE , " Enable TXE on queue: %u \n " , q ) ;
2010-04-16 01:39:26 +04:00
REG_WRITE ( ah , AR_Q_TXE , 1 < < q ) ;
}
EXPORT_SYMBOL ( ath9k_hw_txstart ) ;
u32 ath9k_hw_numtxpending ( struct ath_hw * ah , u32 q )
{
u32 npend ;
npend = REG_READ ( ah , AR_QSTS ( q ) ) & AR_Q_STS_PEND_FR_CNT ;
if ( npend = = 0 ) {
if ( REG_READ ( ah , AR_Q_TXE ) & ( 1 < < q ) )
npend = 1 ;
}
return npend ;
}
EXPORT_SYMBOL ( ath9k_hw_numtxpending ) ;
/**
* ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
*
* @ ah : atheros hardware struct
* @ bIncTrigLevel : whether or not the frame trigger level should be updated
*
* The frame trigger level specifies the minimum number of bytes ,
* in units of 64 bytes , that must be DMA ' ed into the PCU TX FIFO
* before the PCU will initiate sending the frame on the air . This can
* mean we initiate transmit before a full frame is on the PCU TX FIFO .
* Resets to 0x1 ( meaning 64 bytes or a full frame , whichever occurs
* first )
*
* Caution must be taken to ensure to set the frame trigger level based
* on the DMA request size . For example if the DMA request size is set to
* 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
* there need to be enough space in the tx FIFO for the requested transfer
* size . Hence the tx FIFO will stop with 512 - 128 = 384 bytes . If we set
* the threshold to a value beyond 6 , then the transmit will hang .
*
* Current dual stream devices have a PCU TX FIFO size of 8 KB .
* Current single stream devices have a PCU TX FIFO size of 4 KB , however ,
* there is a hardware issue which forces us to use 2 KB instead so the
* frame trigger level must not exceed 2 KB for these chipsets .
*/
bool ath9k_hw_updatetxtriglevel ( struct ath_hw * ah , bool bIncTrigLevel )
{
u32 txcfg , curLevel , newLevel ;
if ( ah - > tx_trig_level > = ah - > config . max_txtrig_level )
return false ;
2010-11-08 22:54:47 +03:00
ath9k_hw_disable_interrupts ( ah ) ;
2010-04-16 01:39:26 +04:00
txcfg = REG_READ ( ah , AR_TXCFG ) ;
curLevel = MS ( txcfg , AR_FTRIG ) ;
newLevel = curLevel ;
if ( bIncTrigLevel ) {
if ( curLevel < ah - > config . max_txtrig_level )
newLevel + + ;
} else if ( curLevel > MIN_TX_FIFO_THRESHOLD )
newLevel - - ;
if ( newLevel ! = curLevel )
REG_WRITE ( ah , AR_TXCFG ,
( txcfg & ~ AR_FTRIG ) | SM ( newLevel , AR_FTRIG ) ) ;
2010-11-08 22:54:47 +03:00
ath9k_hw_enable_interrupts ( ah ) ;
2010-04-16 01:39:26 +04:00
ah - > tx_trig_level = newLevel ;
return newLevel ! = curLevel ;
}
EXPORT_SYMBOL ( ath9k_hw_updatetxtriglevel ) ;
2011-03-11 23:38:18 +03:00
void ath9k_hw_abort_tx_dma ( struct ath_hw * ah )
2010-04-16 01:39:26 +04:00
{
2012-04-19 23:18:29 +04:00
int maxdelay = 1000 ;
2011-03-11 23:38:18 +03:00
int i , q ;
2010-04-16 01:39:26 +04:00
2012-04-19 23:18:29 +04:00
if ( ah - > curchan ) {
if ( IS_CHAN_HALF_RATE ( ah - > curchan ) )
maxdelay * = 2 ;
else if ( IS_CHAN_QUARTER_RATE ( ah - > curchan ) )
maxdelay * = 4 ;
}
2011-03-11 23:38:18 +03:00
REG_WRITE ( ah , AR_Q_TXD , AR_Q_TXD_M ) ;
2010-04-16 01:39:26 +04:00
2011-03-11 23:38:18 +03:00
REG_SET_BIT ( ah , AR_PCU_MISC , AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF ) ;
REG_SET_BIT ( ah , AR_DIAG_SW , AR_DIAG_FORCE_CH_IDLE_HIGH ) ;
REG_SET_BIT ( ah , AR_D_GBL_IFS_MISC , AR_D_GBL_IFS_MISC_IGNORE_BACKOFF ) ;
2010-04-16 01:39:26 +04:00
2011-03-11 23:38:18 +03:00
for ( q = 0 ; q < AR_NUM_QCU ; q + + ) {
2012-04-19 23:18:29 +04:00
for ( i = 0 ; i < maxdelay ; i + + ) {
2011-03-11 23:38:18 +03:00
if ( i )
udelay ( 5 ) ;
2010-04-16 01:39:26 +04:00
2011-03-11 23:38:18 +03:00
if ( ! ath9k_hw_numtxpending ( ah , q ) )
2010-04-16 01:39:26 +04:00
break ;
}
2011-03-11 23:38:18 +03:00
}
2010-04-16 01:39:26 +04:00
2011-03-11 23:38:18 +03:00
REG_CLR_BIT ( ah , AR_PCU_MISC , AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF ) ;
REG_CLR_BIT ( ah , AR_DIAG_SW , AR_DIAG_FORCE_CH_IDLE_HIGH ) ;
REG_CLR_BIT ( ah , AR_D_GBL_IFS_MISC , AR_D_GBL_IFS_MISC_IGNORE_BACKOFF ) ;
2010-04-16 01:39:26 +04:00
2011-03-11 23:38:18 +03:00
REG_WRITE ( ah , AR_Q_TXD , 0 ) ;
}
EXPORT_SYMBOL ( ath9k_hw_abort_tx_dma ) ;
2010-04-16 01:39:26 +04:00
2011-03-11 23:38:20 +03:00
bool ath9k_hw_stop_dma_queue ( struct ath_hw * ah , u32 q )
2010-04-16 01:39:26 +04:00
{
2011-03-11 23:38:20 +03:00
# define ATH9K_TX_STOP_DMA_TIMEOUT 1000 /* usec */
2010-04-16 01:39:26 +04:00
# define ATH9K_TIME_QUANTUM 100 /* usec */
2011-03-11 23:38:20 +03:00
int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM ;
int wait ;
2010-04-16 01:39:26 +04:00
REG_WRITE ( ah , AR_Q_TXD , 1 < < q ) ;
for ( wait = wait_time ; wait ! = 0 ; wait - - ) {
2011-03-11 23:38:20 +03:00
if ( wait ! = wait_time )
2010-04-16 01:39:26 +04:00
udelay ( ATH9K_TIME_QUANTUM ) ;
2011-03-11 23:38:20 +03:00
if ( ath9k_hw_numtxpending ( ah , q ) = = 0 )
break ;
2010-04-16 01:39:26 +04:00
}
REG_WRITE ( ah , AR_Q_TXD , 0 ) ;
2011-03-11 23:38:20 +03:00
2010-04-16 01:39:26 +04:00
return wait ! = 0 ;
# undef ATH9K_TX_STOP_DMA_TIMEOUT
# undef ATH9K_TIME_QUANTUM
}
2011-03-11 23:38:20 +03:00
EXPORT_SYMBOL ( ath9k_hw_stop_dma_queue ) ;
2010-04-16 01:39:26 +04:00
2009-02-09 10:57:12 +03:00
bool ath9k_hw_set_txq_props ( struct ath_hw * ah , int q ,
2008-10-29 07:46:30 +03:00
const struct ath9k_tx_queue_info * qinfo )
{
u32 cw ;
2009-09-13 13:42:02 +04:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-10-29 07:46:30 +03:00
struct ath9k_tx_queue_info * qi ;
2009-02-09 10:57:26 +03:00
qi = & ah - > txq [ q ] ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_type = = ATH9K_TX_QUEUE_INACTIVE ) {
2011-12-16 02:55:53 +04:00
ath_dbg ( common , QUEUE ,
2010-12-03 06:12:37 +03:00
" Set TXQ properties, inactive queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
return false ;
}
2011-12-16 02:55:53 +04:00
ath_dbg ( common , QUEUE , " Set queue properties for: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
qi - > tqi_ver = qinfo - > tqi_ver ;
qi - > tqi_subtype = qinfo - > tqi_subtype ;
qi - > tqi_qflags = qinfo - > tqi_qflags ;
qi - > tqi_priority = qinfo - > tqi_priority ;
if ( qinfo - > tqi_aifs ! = ATH9K_TXQ_USEDEFAULT )
qi - > tqi_aifs = min ( qinfo - > tqi_aifs , 255U ) ;
else
qi - > tqi_aifs = INIT_AIFS ;
if ( qinfo - > tqi_cwmin ! = ATH9K_TXQ_USEDEFAULT ) {
cw = min ( qinfo - > tqi_cwmin , 1024U ) ;
qi - > tqi_cwmin = 1 ;
while ( qi - > tqi_cwmin < cw )
qi - > tqi_cwmin = ( qi - > tqi_cwmin < < 1 ) | 1 ;
} else
qi - > tqi_cwmin = qinfo - > tqi_cwmin ;
if ( qinfo - > tqi_cwmax ! = ATH9K_TXQ_USEDEFAULT ) {
cw = min ( qinfo - > tqi_cwmax , 1024U ) ;
qi - > tqi_cwmax = 1 ;
while ( qi - > tqi_cwmax < cw )
qi - > tqi_cwmax = ( qi - > tqi_cwmax < < 1 ) | 1 ;
} else
qi - > tqi_cwmax = INIT_CWMAX ;
if ( qinfo - > tqi_shretry ! = 0 )
qi - > tqi_shretry = min ( ( u32 ) qinfo - > tqi_shretry , 15U ) ;
else
qi - > tqi_shretry = INIT_SH_RETRY ;
if ( qinfo - > tqi_lgretry ! = 0 )
qi - > tqi_lgretry = min ( ( u32 ) qinfo - > tqi_lgretry , 15U ) ;
else
qi - > tqi_lgretry = INIT_LG_RETRY ;
qi - > tqi_cbrPeriod = qinfo - > tqi_cbrPeriod ;
qi - > tqi_cbrOverflowLimit = qinfo - > tqi_cbrOverflowLimit ;
qi - > tqi_burstTime = qinfo - > tqi_burstTime ;
qi - > tqi_readyTime = qinfo - > tqi_readyTime ;
switch ( qinfo - > tqi_subtype ) {
case ATH9K_WME_UPSD :
if ( qi - > tqi_type = = ATH9K_TX_QUEUE_DATA )
qi - > tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS ;
break ;
default :
break ;
}
return true ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_set_txq_props ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:12 +03:00
bool ath9k_hw_get_txq_props ( struct ath_hw * ah , int q ,
2008-10-29 07:46:30 +03:00
struct ath9k_tx_queue_info * qinfo )
{
2009-09-13 13:42:02 +04:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-10-29 07:46:30 +03:00
struct ath9k_tx_queue_info * qi ;
2009-02-09 10:57:26 +03:00
qi = & ah - > txq [ q ] ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_type = = ATH9K_TX_QUEUE_INACTIVE ) {
2011-12-16 02:55:53 +04:00
ath_dbg ( common , QUEUE ,
2010-12-03 06:12:37 +03:00
" Get TXQ properties, inactive queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
return false ;
}
qinfo - > tqi_qflags = qi - > tqi_qflags ;
qinfo - > tqi_ver = qi - > tqi_ver ;
qinfo - > tqi_subtype = qi - > tqi_subtype ;
qinfo - > tqi_qflags = qi - > tqi_qflags ;
qinfo - > tqi_priority = qi - > tqi_priority ;
qinfo - > tqi_aifs = qi - > tqi_aifs ;
qinfo - > tqi_cwmin = qi - > tqi_cwmin ;
qinfo - > tqi_cwmax = qi - > tqi_cwmax ;
qinfo - > tqi_shretry = qi - > tqi_shretry ;
qinfo - > tqi_lgretry = qi - > tqi_lgretry ;
qinfo - > tqi_cbrPeriod = qi - > tqi_cbrPeriod ;
qinfo - > tqi_cbrOverflowLimit = qi - > tqi_cbrOverflowLimit ;
qinfo - > tqi_burstTime = qi - > tqi_burstTime ;
qinfo - > tqi_readyTime = qi - > tqi_readyTime ;
return true ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_get_txq_props ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:12 +03:00
int ath9k_hw_setuptxqueue ( struct ath_hw * ah , enum ath9k_tx_queue type ,
2008-10-29 07:46:30 +03:00
const struct ath9k_tx_queue_info * qinfo )
{
2009-09-13 13:42:02 +04:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-10-29 07:46:30 +03:00
struct ath9k_tx_queue_info * qi ;
int q ;
switch ( type ) {
case ATH9K_TX_QUEUE_BEACON :
2011-03-23 22:57:28 +03:00
q = ATH9K_NUM_TX_QUEUES - 1 ;
2008-10-29 07:46:30 +03:00
break ;
case ATH9K_TX_QUEUE_CAB :
2011-03-23 22:57:28 +03:00
q = ATH9K_NUM_TX_QUEUES - 2 ;
2008-10-29 07:46:30 +03:00
break ;
case ATH9K_TX_QUEUE_PSPOLL :
q = 1 ;
break ;
case ATH9K_TX_QUEUE_UAPSD :
2011-03-23 22:57:28 +03:00
q = ATH9K_NUM_TX_QUEUES - 3 ;
2008-10-29 07:46:30 +03:00
break ;
case ATH9K_TX_QUEUE_DATA :
2011-03-23 22:57:28 +03:00
for ( q = 0 ; q < ATH9K_NUM_TX_QUEUES ; q + + )
2009-02-09 10:57:26 +03:00
if ( ah - > txq [ q ] . tqi_type = =
2008-10-29 07:46:30 +03:00
ATH9K_TX_QUEUE_INACTIVE )
break ;
2011-03-23 22:57:28 +03:00
if ( q = = ATH9K_NUM_TX_QUEUES ) {
2010-12-03 06:12:36 +03:00
ath_err ( common , " No available TX queue \n " ) ;
2008-10-29 07:46:30 +03:00
return - 1 ;
}
break ;
default :
2010-12-03 06:12:36 +03:00
ath_err ( common , " Invalid TX queue type: %u \n " , type ) ;
2008-10-29 07:46:30 +03:00
return - 1 ;
}
2011-12-16 02:55:53 +04:00
ath_dbg ( common , QUEUE , " Setup TX queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:26 +03:00
qi = & ah - > txq [ q ] ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_type ! = ATH9K_TX_QUEUE_INACTIVE ) {
2010-12-03 06:12:36 +03:00
ath_err ( common , " TX queue: %u already active \n " , q ) ;
2008-10-29 07:46:30 +03:00
return - 1 ;
}
memset ( qi , 0 , sizeof ( struct ath9k_tx_queue_info ) ) ;
qi - > tqi_type = type ;
2011-08-13 08:58:12 +04:00
qi - > tqi_physCompBuf = qinfo - > tqi_physCompBuf ;
( void ) ath9k_hw_set_txq_props ( ah , q , qinfo ) ;
2008-10-29 07:46:30 +03:00
return q ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_setuptxqueue ) ;
2008-10-29 07:46:30 +03:00
2012-03-14 19:40:21 +04:00
static void ath9k_hw_clear_queue_interrupts ( struct ath_hw * ah , u32 q )
{
ah - > txok_interrupt_mask & = ~ ( 1 < < q ) ;
ah - > txerr_interrupt_mask & = ~ ( 1 < < q ) ;
ah - > txdesc_interrupt_mask & = ~ ( 1 < < q ) ;
ah - > txeol_interrupt_mask & = ~ ( 1 < < q ) ;
ah - > txurn_interrupt_mask & = ~ ( 1 < < q ) ;
}
2009-02-09 10:57:12 +03:00
bool ath9k_hw_releasetxqueue ( struct ath_hw * ah , u32 q )
2008-10-29 07:46:30 +03:00
{
2009-09-13 13:42:02 +04:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-10-29 07:46:30 +03:00
struct ath9k_tx_queue_info * qi ;
2009-02-09 10:57:26 +03:00
qi = & ah - > txq [ q ] ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_type = = ATH9K_TX_QUEUE_INACTIVE ) {
2011-12-16 02:55:53 +04:00
ath_dbg ( common , QUEUE , " Release TXQ, inactive queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
return false ;
}
2011-12-16 02:55:53 +04:00
ath_dbg ( common , QUEUE , " Release TX queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
qi - > tqi_type = ATH9K_TX_QUEUE_INACTIVE ;
2012-03-14 19:40:21 +04:00
ath9k_hw_clear_queue_interrupts ( ah , q ) ;
2008-10-29 07:46:30 +03:00
ath9k_hw_set_txq_interrupts ( ah , qi ) ;
return true ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_releasetxqueue ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:12 +03:00
bool ath9k_hw_resettxqueue ( struct ath_hw * ah , u32 q )
2008-10-29 07:46:30 +03:00
{
2009-09-13 13:42:02 +04:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2008-10-29 07:46:30 +03:00
struct ath9k_tx_queue_info * qi ;
u32 cwMin , chanCwMin , value ;
2009-02-09 10:57:26 +03:00
qi = & ah - > txq [ q ] ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_type = = ATH9K_TX_QUEUE_INACTIVE ) {
2011-12-16 02:55:53 +04:00
ath_dbg ( common , QUEUE , " Reset TXQ, inactive queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
return true ;
}
2011-12-16 02:55:53 +04:00
ath_dbg ( common , QUEUE , " Reset TX queue: %u \n " , q ) ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_cwmin = = ATH9K_TXQ_USEDEFAULT ) {
2013-10-12 01:30:54 +04:00
chanCwMin = INIT_CWMIN ;
2008-10-29 07:46:30 +03:00
for ( cwMin = 1 ; cwMin < chanCwMin ; cwMin = ( cwMin < < 1 ) | 1 ) ;
} else
cwMin = qi - > tqi_cwmin ;
2010-04-16 10:23:57 +04:00
ENABLE_REGWRITE_BUFFER ( ah ) ;
2008-10-29 07:46:30 +03:00
REG_WRITE ( ah , AR_DLCL_IFS ( q ) ,
SM ( cwMin , AR_D_LCL_IFS_CWMIN ) |
SM ( qi - > tqi_cwmax , AR_D_LCL_IFS_CWMAX ) |
SM ( qi - > tqi_aifs , AR_D_LCL_IFS_AIFS ) ) ;
REG_WRITE ( ah , AR_DRETRY_LIMIT ( q ) ,
SM ( INIT_SSH_RETRY , AR_D_RETRY_LIMIT_STA_SH ) |
SM ( INIT_SLG_RETRY , AR_D_RETRY_LIMIT_STA_LG ) |
SM ( qi - > tqi_shretry , AR_D_RETRY_LIMIT_FR_SH ) ) ;
REG_WRITE ( ah , AR_QMISC ( q ) , AR_Q_MISC_DCU_EARLY_TERM_REQ ) ;
2011-05-09 17:41:27 +04:00
2013-05-23 14:20:56 +04:00
if ( AR_SREV_9340 ( ah ) & & ! AR_SREV_9340_13_OR_LATER ( ah ) )
2011-05-09 17:41:27 +04:00
REG_WRITE ( ah , AR_DMISC ( q ) ,
AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1 ) ;
else
REG_WRITE ( ah , AR_DMISC ( q ) ,
AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2 ) ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_cbrPeriod ) {
REG_WRITE ( ah , AR_QCBRCFG ( q ) ,
SM ( qi - > tqi_cbrPeriod , AR_Q_CBRCFG_INTERVAL ) |
SM ( qi - > tqi_cbrOverflowLimit , AR_Q_CBRCFG_OVF_THRESH ) ) ;
2011-03-23 22:57:26 +03:00
REG_SET_BIT ( ah , AR_QMISC ( q ) , AR_Q_MISC_FSP_CBR |
( qi - > tqi_cbrOverflowLimit ?
AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0 ) ) ;
2008-10-29 07:46:30 +03:00
}
if ( qi - > tqi_readyTime & & ( qi - > tqi_type ! = ATH9K_TX_QUEUE_CAB ) ) {
REG_WRITE ( ah , AR_QRDYTIMECFG ( q ) ,
SM ( qi - > tqi_readyTime , AR_Q_RDYTIMECFG_DURATION ) |
AR_Q_RDYTIMECFG_EN ) ;
}
REG_WRITE ( ah , AR_DCHNTIME ( q ) ,
SM ( qi - > tqi_burstTime , AR_D_CHNTIME_DUR ) |
( qi - > tqi_burstTime ? AR_D_CHNTIME_EN : 0 ) ) ;
if ( qi - > tqi_burstTime
2011-03-23 22:57:26 +03:00
& & ( qi - > tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE ) )
REG_SET_BIT ( ah , AR_QMISC ( q ) , AR_Q_MISC_RDYTIME_EXP_POLICY ) ;
2008-10-29 07:46:30 +03:00
2011-03-23 22:57:26 +03:00
if ( qi - > tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE )
REG_SET_BIT ( ah , AR_DMISC ( q ) , AR_D_MISC_POST_FR_BKOFF_DIS ) ;
2010-04-16 10:23:57 +04:00
REGWRITE_BUFFER_FLUSH ( ah ) ;
2011-03-23 22:57:26 +03:00
if ( qi - > tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE )
REG_SET_BIT ( ah , AR_DMISC ( q ) , AR_D_MISC_FRAG_BKOFF_EN ) ;
2008-10-29 07:46:30 +03:00
switch ( qi - > tqi_type ) {
case ATH9K_TX_QUEUE_BEACON :
2010-04-16 10:23:57 +04:00
ENABLE_REGWRITE_BUFFER ( ah ) ;
2011-03-23 22:57:26 +03:00
REG_SET_BIT ( ah , AR_QMISC ( q ) ,
AR_Q_MISC_FSP_DBA_GATED
| AR_Q_MISC_BEACON_USE
| AR_Q_MISC_CBR_INCR_DIS1 ) ;
2008-10-29 07:46:30 +03:00
2011-03-23 22:57:26 +03:00
REG_SET_BIT ( ah , AR_DMISC ( q ) ,
( AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL < <
2008-10-29 07:46:30 +03:00
AR_D_MISC_ARB_LOCKOUT_CNTRL_S )
2011-03-23 22:57:26 +03:00
| AR_D_MISC_BEACON_USE
| AR_D_MISC_POST_FR_BKOFF_DIS ) ;
2010-04-16 10:23:57 +04:00
REGWRITE_BUFFER_FLUSH ( ah ) ;
2010-06-15 04:17:36 +04:00
/*
* cwmin and cwmax should be 0 for beacon queue
* but not for IBSS as we would create an imbalance
* on beaconing fairness for participating nodes .
*/
if ( AR_SREV_9300_20_OR_LATER ( ah ) & &
ah - > opmode ! = NL80211_IFTYPE_ADHOC ) {
2010-04-16 01:39:32 +04:00
REG_WRITE ( ah , AR_DLCL_IFS ( q ) , SM ( 0 , AR_D_LCL_IFS_CWMIN )
| SM ( 0 , AR_D_LCL_IFS_CWMAX )
| SM ( qi - > tqi_aifs , AR_D_LCL_IFS_AIFS ) ) ;
}
2008-10-29 07:46:30 +03:00
break ;
case ATH9K_TX_QUEUE_CAB :
2010-04-16 10:23:57 +04:00
ENABLE_REGWRITE_BUFFER ( ah ) ;
2011-03-23 22:57:26 +03:00
REG_SET_BIT ( ah , AR_QMISC ( q ) ,
AR_Q_MISC_FSP_DBA_GATED
| AR_Q_MISC_CBR_INCR_DIS1
| AR_Q_MISC_CBR_INCR_DIS0 ) ;
2008-10-29 07:46:30 +03:00
value = ( qi - > tqi_readyTime -
2009-02-09 10:57:26 +03:00
( ah - > config . sw_beacon_response_time -
2013-12-14 21:03:42 +04:00
ah - > config . dma_beacon_response_time ) ) * 1024 ;
2008-10-29 07:46:30 +03:00
REG_WRITE ( ah , AR_QRDYTIMECFG ( q ) ,
value | AR_Q_RDYTIMECFG_EN ) ;
2011-03-23 22:57:26 +03:00
REG_SET_BIT ( ah , AR_DMISC ( q ) ,
( AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL < <
2008-10-29 07:46:30 +03:00
AR_D_MISC_ARB_LOCKOUT_CNTRL_S ) ) ;
2010-04-16 10:23:57 +04:00
REGWRITE_BUFFER_FLUSH ( ah ) ;
2008-10-29 07:46:30 +03:00
break ;
case ATH9K_TX_QUEUE_PSPOLL :
2011-03-23 22:57:26 +03:00
REG_SET_BIT ( ah , AR_QMISC ( q ) , AR_Q_MISC_CBR_INCR_DIS1 ) ;
2008-10-29 07:46:30 +03:00
break ;
case ATH9K_TX_QUEUE_UAPSD :
2011-03-23 22:57:26 +03:00
REG_SET_BIT ( ah , AR_DMISC ( q ) , AR_D_MISC_POST_FR_BKOFF_DIS ) ;
2008-10-29 07:46:30 +03:00
break ;
default :
break ;
}
if ( qi - > tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS ) {
2011-03-23 22:57:26 +03:00
REG_SET_BIT ( ah , AR_DMISC ( q ) ,
SM ( AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL ,
AR_D_MISC_ARB_LOCKOUT_CNTRL ) |
AR_D_MISC_POST_FR_BKOFF_DIS ) ;
2008-10-29 07:46:30 +03:00
}
2010-04-16 01:39:31 +04:00
if ( AR_SREV_9300_20_OR_LATER ( ah ) )
REG_WRITE ( ah , AR_Q_DESC_CRCCHK , AR_Q_DESC_CRCCHK_EN ) ;
2012-03-14 19:40:21 +04:00
ath9k_hw_clear_queue_interrupts ( ah , q ) ;
2012-03-14 19:40:22 +04:00
if ( qi - > tqi_qflags & TXQ_FLAG_TXINT_ENABLE ) {
2009-02-09 10:57:26 +03:00
ah - > txok_interrupt_mask | = 1 < < q ;
ah - > txerr_interrupt_mask | = 1 < < q ;
2012-03-14 19:40:22 +04:00
}
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE )
2009-02-09 10:57:26 +03:00
ah - > txdesc_interrupt_mask | = 1 < < q ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE )
2009-02-09 10:57:26 +03:00
ah - > txeol_interrupt_mask | = 1 < < q ;
2008-10-29 07:46:30 +03:00
if ( qi - > tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE )
2009-02-09 10:57:26 +03:00
ah - > txurn_interrupt_mask | = 1 < < q ;
2008-10-29 07:46:30 +03:00
ath9k_hw_set_txq_interrupts ( ah , qi ) ;
return true ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_resettxqueue ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:12 +03:00
int ath9k_hw_rxprocdesc ( struct ath_hw * ah , struct ath_desc * ds ,
2011-08-13 08:58:11 +04:00
struct ath_rx_status * rs )
2008-10-29 07:46:30 +03:00
{
struct ar5416_desc ads ;
struct ar5416_desc * adsp = AR5416DESC ( ds ) ;
u32 phyerr ;
if ( ( adsp - > ds_rxstatus8 & AR_RxDone ) = = 0 )
return - EINPROGRESS ;
ads . u . rx = adsp - > u . rx ;
2010-03-30 07:09:27 +04:00
rs - > rs_status = 0 ;
rs - > rs_flags = 0 ;
2013-05-24 14:18:30 +04:00
rs - > flag = 0 ;
2008-10-29 07:46:30 +03:00
2010-03-30 07:09:27 +04:00
rs - > rs_datalen = ads . ds_rxstatus1 & AR_DataLen ;
rs - > rs_tstamp = ads . AR_RcvTimestamp ;
2008-10-29 07:46:30 +03:00
2009-07-15 04:17:08 +04:00
if ( ads . ds_rxstatus8 & AR_PostDelimCRCErr ) {
2010-03-30 07:09:27 +04:00
rs - > rs_rssi = ATH9K_RSSI_BAD ;
2013-12-14 21:03:36 +04:00
rs - > rs_rssi_ctl [ 0 ] = ATH9K_RSSI_BAD ;
rs - > rs_rssi_ctl [ 1 ] = ATH9K_RSSI_BAD ;
rs - > rs_rssi_ctl [ 2 ] = ATH9K_RSSI_BAD ;
rs - > rs_rssi_ext [ 0 ] = ATH9K_RSSI_BAD ;
rs - > rs_rssi_ext [ 1 ] = ATH9K_RSSI_BAD ;
rs - > rs_rssi_ext [ 2 ] = ATH9K_RSSI_BAD ;
2009-07-15 04:17:08 +04:00
} else {
2010-03-30 07:09:27 +04:00
rs - > rs_rssi = MS ( ads . ds_rxstatus4 , AR_RxRSSICombined ) ;
2013-12-14 21:03:36 +04:00
rs - > rs_rssi_ctl [ 0 ] = MS ( ads . ds_rxstatus0 ,
2009-07-15 04:17:08 +04:00
AR_RxRSSIAnt00 ) ;
2013-12-14 21:03:36 +04:00
rs - > rs_rssi_ctl [ 1 ] = MS ( ads . ds_rxstatus0 ,
2009-07-15 04:17:08 +04:00
AR_RxRSSIAnt01 ) ;
2013-12-14 21:03:36 +04:00
rs - > rs_rssi_ctl [ 2 ] = MS ( ads . ds_rxstatus0 ,
2009-07-15 04:17:08 +04:00
AR_RxRSSIAnt02 ) ;
2013-12-14 21:03:36 +04:00
rs - > rs_rssi_ext [ 0 ] = MS ( ads . ds_rxstatus4 ,
2009-07-15 04:17:08 +04:00
AR_RxRSSIAnt10 ) ;
2013-12-14 21:03:36 +04:00
rs - > rs_rssi_ext [ 1 ] = MS ( ads . ds_rxstatus4 ,
2009-07-15 04:17:08 +04:00
AR_RxRSSIAnt11 ) ;
2013-12-14 21:03:36 +04:00
rs - > rs_rssi_ext [ 2 ] = MS ( ads . ds_rxstatus4 ,
2009-07-15 04:17:08 +04:00
AR_RxRSSIAnt12 ) ;
}
2008-10-29 07:46:30 +03:00
if ( ads . ds_rxstatus8 & AR_RxKeyIdxValid )
2010-03-30 07:09:27 +04:00
rs - > rs_keyix = MS ( ads . ds_rxstatus8 , AR_KeyIdx ) ;
2008-10-29 07:46:30 +03:00
else
2010-03-30 07:09:27 +04:00
rs - > rs_keyix = ATH9K_RXKEYIX_INVALID ;
2008-10-29 07:46:30 +03:00
2011-09-15 16:25:35 +04:00
rs - > rs_rate = MS ( ads . ds_rxstatus0 , AR_RxRate ) ;
2010-03-30 07:09:27 +04:00
rs - > rs_more = ( ads . ds_rxstatus1 & AR_RxMore ) ? 1 : 0 ;
2008-10-29 07:46:30 +03:00
2013-08-14 19:45:55 +04:00
rs - > rs_firstaggr = ( ads . ds_rxstatus8 & AR_RxFirstAggr ) ? 1 : 0 ;
2010-03-30 07:09:27 +04:00
rs - > rs_isaggr = ( ads . ds_rxstatus8 & AR_RxAggr ) ? 1 : 0 ;
2013-08-14 19:45:55 +04:00
rs - > rs_moreaggr = ( ads . ds_rxstatus8 & AR_RxMoreAggr ) ? 1 : 0 ;
2010-03-30 07:09:27 +04:00
rs - > rs_antenna = MS ( ads . ds_rxstatus3 , AR_RxAntenna ) ;
2013-05-24 14:18:30 +04:00
/* directly mapped flags for ieee80211_rx_status */
rs - > flag | =
( ads . ds_rxstatus3 & AR_GI ) ? RX_FLAG_SHORT_GI : 0 ;
rs - > flag | =
( ads . ds_rxstatus3 & AR_2040 ) ? RX_FLAG_40MHZ : 0 ;
2013-05-24 22:30:59 +04:00
if ( AR_SREV_9280_20_OR_LATER ( ah ) )
rs - > flag | =
( ads . ds_rxstatus3 & AR_STBC ) ?
/* we can only Nss=1 STBC */
( 1 < < RX_FLAG_STBC_SHIFT ) : 0 ;
2008-10-29 07:46:30 +03:00
if ( ads . ds_rxstatus8 & AR_PreDelimCRCErr )
2010-03-30 07:09:27 +04:00
rs - > rs_flags | = ATH9K_RX_DELIM_CRC_PRE ;
2008-10-29 07:46:30 +03:00
if ( ads . ds_rxstatus8 & AR_PostDelimCRCErr )
2010-03-30 07:09:27 +04:00
rs - > rs_flags | = ATH9K_RX_DELIM_CRC_POST ;
2008-10-29 07:46:30 +03:00
if ( ads . ds_rxstatus8 & AR_DecryptBusyErr )
2010-03-30 07:09:27 +04:00
rs - > rs_flags | = ATH9K_RX_DECRYPT_BUSY ;
2008-10-29 07:46:30 +03:00
if ( ( ads . ds_rxstatus8 & AR_RxFrameOK ) = = 0 ) {
2011-01-14 02:06:27 +03:00
/*
* Treat these errors as mutually exclusive to avoid spurious
* extra error reports from the hardware . If a CRC error is
* reported , then decryption and MIC errors are irrelevant ,
* the frame is going to be dropped either way
*/
2013-01-23 20:38:06 +04:00
if ( ads . ds_rxstatus8 & AR_PHYErr ) {
2010-03-30 07:09:27 +04:00
rs - > rs_status | = ATH9K_RXERR_PHY ;
2008-10-29 07:46:30 +03:00
phyerr = MS ( ads . ds_rxstatus8 , AR_PHYErrCode ) ;
2010-03-30 07:09:27 +04:00
rs - > rs_phyerr = phyerr ;
2013-01-23 20:38:06 +04:00
} else if ( ads . ds_rxstatus8 & AR_CRCErr )
rs - > rs_status | = ATH9K_RXERR_CRC ;
else if ( ads . ds_rxstatus8 & AR_DecryptCRCErr )
2010-03-30 07:09:27 +04:00
rs - > rs_status | = ATH9K_RXERR_DECRYPT ;
2011-01-14 02:06:27 +03:00
else if ( ads . ds_rxstatus8 & AR_MichaelErr )
2010-03-30 07:09:27 +04:00
rs - > rs_status | = ATH9K_RXERR_MIC ;
2013-04-08 02:04:12 +04:00
} else {
if ( ads . ds_rxstatus8 &
( AR_CRCErr | AR_PHYErr | AR_DecryptCRCErr | AR_MichaelErr ) )
rs - > rs_status | = ATH9K_RXERR_CORRUPT_DESC ;
/* Only up to MCS16 supported, everything above is invalid */
if ( rs - > rs_rate > = 0x90 )
rs - > rs_status | = ATH9K_RXERR_CORRUPT_DESC ;
2008-10-29 07:46:30 +03:00
}
2012-01-14 18:08:34 +04:00
if ( ads . ds_rxstatus8 & AR_KeyMiss )
rs - > rs_status | = ATH9K_RXERR_KEYMISS ;
2008-10-29 07:46:30 +03:00
return 0 ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_rxprocdesc ) ;
2008-10-29 07:46:30 +03:00
2009-11-24 10:53:25 +03:00
/*
* This can stop or re - enables RX .
*
* If bool is set this will kill any frame which is currently being
* transferred between the MAC and baseband and also prevent any new
* frames from getting started .
*/
2009-02-09 10:57:12 +03:00
bool ath9k_hw_setrxabort ( struct ath_hw * ah , bool set )
2008-10-29 07:46:30 +03:00
{
u32 reg ;
if ( set ) {
REG_SET_BIT ( ah , AR_DIAG_SW ,
( AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT ) ) ;
2009-02-16 10:53:20 +03:00
if ( ! ath9k_hw_wait ( ah , AR_OBS_BUS_1 , AR_OBS_BUS_1_RX_STATE ,
0 , AH_WAIT_TIMEOUT ) ) {
2008-10-29 07:46:30 +03:00
REG_CLR_BIT ( ah , AR_DIAG_SW ,
( AR_DIAG_RX_DIS |
AR_DIAG_RX_ABORT ) ) ;
reg = REG_READ ( ah , AR_OBS_BUS_1 ) ;
2010-12-03 06:12:36 +03:00
ath_err ( ath9k_hw_common ( ah ) ,
" RX failed to go idle in 10 ms RXSM=0x%x \n " ,
reg ) ;
2008-10-29 07:46:30 +03:00
return false ;
}
} else {
REG_CLR_BIT ( ah , AR_DIAG_SW ,
( AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT ) ) ;
}
return true ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_setrxabort ) ;
2008-10-29 07:46:30 +03:00
2009-02-09 10:57:12 +03:00
void ath9k_hw_putrxbuf ( struct ath_hw * ah , u32 rxdp )
2008-10-29 07:46:30 +03:00
{
REG_WRITE ( ah , AR_RXDP , rxdp ) ;
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_putrxbuf ) ;
2008-10-29 07:46:30 +03:00
2010-06-12 08:33:44 +04:00
void ath9k_hw_startpcureceive ( struct ath_hw * ah , bool is_scanning )
2008-10-29 07:46:30 +03:00
{
ath9k_enable_mib_counters ( ah ) ;
2010-06-12 08:33:44 +04:00
ath9k_ani_reset ( ah , is_scanning ) ;
2008-12-08 17:13:48 +03:00
2008-12-08 17:13:50 +03:00
REG_CLR_BIT ( ah , AR_DIAG_SW , ( AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT ) ) ;
2008-10-29 07:46:30 +03:00
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_startpcureceive ) ;
2008-10-29 07:46:30 +03:00
2010-04-16 01:39:41 +04:00
void ath9k_hw_abortpcurecv ( struct ath_hw * ah )
{
REG_SET_BIT ( ah , AR_DIAG_SW , AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS ) ;
ath9k_hw_disable_mib_counters ( ah ) ;
}
EXPORT_SYMBOL ( ath9k_hw_abortpcurecv ) ;
2011-04-08 22:13:18 +04:00
bool ath9k_hw_stopdmarecv ( struct ath_hw * ah , bool * reset )
2008-10-29 07:46:30 +03:00
{
2009-02-16 10:53:20 +03:00
# define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
2009-09-13 13:42:02 +04:00
struct ath_common * common = ath9k_hw_common ( ah ) ;
2011-04-08 22:13:18 +04:00
u32 mac_status , last_mac_status = 0 ;
2009-02-16 10:53:20 +03:00
int i ;
2011-04-08 22:13:18 +04:00
/* Enable access to the DMA observation bus */
REG_WRITE ( ah , AR_MACMISC ,
( ( AR_MACMISC_DMA_OBS_LINE_8 < < AR_MACMISC_DMA_OBS_S ) |
( AR_MACMISC_MISC_OBS_BUS_1 < <
AR_MACMISC_MISC_OBS_BUS_MSB_S ) ) ) ;
2008-10-29 07:46:30 +03:00
REG_WRITE ( ah , AR_CR , AR_CR_RXD ) ;
2009-02-16 10:53:20 +03:00
/* Wait for rx enable bit to go low */
for ( i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM ; i ! = 0 ; i - - ) {
if ( ( REG_READ ( ah , AR_CR ) & AR_CR_RXE ) = = 0 )
break ;
2011-04-08 22:13:18 +04:00
if ( ! AR_SREV_9300_20_OR_LATER ( ah ) ) {
mac_status = REG_READ ( ah , AR_DMADBG_7 ) & 0x7f0 ;
if ( mac_status = = 0x1c0 & & mac_status = = last_mac_status ) {
* reset = true ;
break ;
}
last_mac_status = mac_status ;
}
2009-02-16 10:53:20 +03:00
udelay ( AH_TIME_QUANTUM ) ;
}
if ( i = = 0 ) {
2010-12-03 06:12:36 +03:00
ath_err ( common ,
2011-04-08 22:13:18 +04:00
" DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x \n " ,
2010-12-03 06:12:36 +03:00
AH_RX_STOP_DMA_TIMEOUT / 1000 ,
REG_READ ( ah , AR_CR ) ,
2011-04-08 22:13:18 +04:00
REG_READ ( ah , AR_DIAG_SW ) ,
REG_READ ( ah , AR_DMADBG_7 ) ) ;
2008-10-29 07:46:30 +03:00
return false ;
} else {
return true ;
}
2009-02-16 10:53:20 +03:00
# undef AH_RX_STOP_DMA_TIMEOUT
2008-10-29 07:46:30 +03:00
}
2009-09-24 07:07:00 +04:00
EXPORT_SYMBOL ( ath9k_hw_stopdmarecv ) ;
2009-10-07 05:19:11 +04:00
int ath9k_hw_beaconq_setup ( struct ath_hw * ah )
{
struct ath9k_tx_queue_info qi ;
memset ( & qi , 0 , sizeof ( qi ) ) ;
qi . tqi_aifs = 1 ;
qi . tqi_cwmin = 0 ;
qi . tqi_cwmax = 0 ;
2012-02-27 22:58:41 +04:00
if ( ah - > caps . hw_caps & ATH9K_HW_CAP_EDMA )
2012-03-14 19:40:22 +04:00
qi . tqi_qflags = TXQ_FLAG_TXINT_ENABLE ;
2012-02-27 22:58:41 +04:00
2009-10-07 05:19:11 +04:00
return ath9k_hw_setuptxqueue ( ah , ATH9K_TX_QUEUE_BEACON , & qi ) ;
}
EXPORT_SYMBOL ( ath9k_hw_beaconq_setup ) ;
2010-04-16 01:39:06 +04:00
bool ath9k_hw_intrpend ( struct ath_hw * ah )
{
u32 host_isr ;
if ( AR_SREV_9100 ( ah ) )
return true ;
host_isr = REG_READ ( ah , AR_INTR_ASYNC_CAUSE ) ;
2011-11-30 09:11:20 +04:00
if ( ( ( host_isr & AR_INTR_MAC_IRQ ) | |
( host_isr & AR_INTR_ASYNC_MASK_MCI ) ) & &
( host_isr ! = AR_INTR_SPURIOUS ) )
2010-04-16 01:39:06 +04:00
return true ;
host_isr = REG_READ ( ah , AR_INTR_SYNC_CAUSE ) ;
if ( ( host_isr & AR_INTR_SYNC_DEFAULT )
& & ( host_isr ! = AR_INTR_SPURIOUS ) )
return true ;
return false ;
}
EXPORT_SYMBOL ( ath9k_hw_intrpend ) ;
2012-08-08 18:25:03 +04:00
void ath9k_hw_kill_interrupts ( struct ath_hw * ah )
2010-11-08 22:54:47 +03:00
{
struct ath_common * common = ath9k_hw_common ( ah ) ;
2011-12-16 02:55:53 +04:00
ath_dbg ( common , INTERRUPT , " disable IER \n " ) ;
2010-11-08 22:54:47 +03:00
REG_WRITE ( ah , AR_IER , AR_IER_DISABLE ) ;
( void ) REG_READ ( ah , AR_IER ) ;
if ( ! AR_SREV_9100 ( ah ) ) {
REG_WRITE ( ah , AR_INTR_ASYNC_ENABLE , 0 ) ;
( void ) REG_READ ( ah , AR_INTR_ASYNC_ENABLE ) ;
REG_WRITE ( ah , AR_INTR_SYNC_ENABLE , 0 ) ;
( void ) REG_READ ( ah , AR_INTR_SYNC_ENABLE ) ;
}
}
2012-08-08 18:25:03 +04:00
EXPORT_SYMBOL ( ath9k_hw_kill_interrupts ) ;
void ath9k_hw_disable_interrupts ( struct ath_hw * ah )
{
if ( ! ( ah - > imask & ATH9K_INT_GLOBAL ) )
atomic_set ( & ah - > intr_ref_cnt , - 1 ) ;
else
atomic_dec ( & ah - > intr_ref_cnt ) ;
ath9k_hw_kill_interrupts ( ah ) ;
}
2010-11-08 22:54:47 +03:00
EXPORT_SYMBOL ( ath9k_hw_disable_interrupts ) ;
void ath9k_hw_enable_interrupts ( struct ath_hw * ah )
{
struct ath_common * common = ath9k_hw_common ( ah ) ;
2011-04-19 17:59:19 +04:00
u32 sync_default = AR_INTR_SYNC_DEFAULT ;
2011-11-30 09:11:19 +04:00
u32 async_mask ;
2010-11-08 22:54:47 +03:00
if ( ! ( ah - > imask & ATH9K_INT_GLOBAL ) )
return ;
2011-08-05 17:29:41 +04:00
if ( ! atomic_inc_and_test ( & ah - > intr_ref_cnt ) ) {
2011-12-16 02:55:53 +04:00
ath_dbg ( common , INTERRUPT , " Do not enable IER ref count %d \n " ,
2011-08-05 17:29:41 +04:00
atomic_read ( & ah - > intr_ref_cnt ) ) ;
return ;
}
2012-07-03 21:13:29 +04:00
if ( AR_SREV_9340 ( ah ) | | AR_SREV_9550 ( ah ) )
2011-04-19 17:59:19 +04:00
sync_default & = ~ AR_INTR_SYNC_HOST1_FATAL ;
2011-11-30 09:11:19 +04:00
async_mask = AR_INTR_MAC_IRQ ;
if ( ah - > imask & ATH9K_INT_MCI )
async_mask | = AR_INTR_ASYNC_MASK_MCI ;
2011-12-16 02:55:53 +04:00
ath_dbg ( common , INTERRUPT , " enable IER \n " ) ;
2010-11-08 22:54:47 +03:00
REG_WRITE ( ah , AR_IER , AR_IER_ENABLE ) ;
if ( ! AR_SREV_9100 ( ah ) ) {
2011-11-30 09:11:19 +04:00
REG_WRITE ( ah , AR_INTR_ASYNC_ENABLE , async_mask ) ;
REG_WRITE ( ah , AR_INTR_ASYNC_MASK , async_mask ) ;
2010-11-08 22:54:47 +03:00
2011-04-19 17:59:19 +04:00
REG_WRITE ( ah , AR_INTR_SYNC_ENABLE , sync_default ) ;
REG_WRITE ( ah , AR_INTR_SYNC_MASK , sync_default ) ;
2010-11-08 22:54:47 +03:00
}
2011-12-16 02:55:53 +04:00
ath_dbg ( common , INTERRUPT , " AR_IMR 0x%x IER 0x%x \n " ,
2010-12-03 06:12:37 +03:00
REG_READ ( ah , AR_IMR ) , REG_READ ( ah , AR_IER ) ) ;
2010-11-08 22:54:47 +03:00
}
EXPORT_SYMBOL ( ath9k_hw_enable_interrupts ) ;
2011-10-08 22:06:19 +04:00
void ath9k_hw_set_interrupts ( struct ath_hw * ah )
2010-04-16 01:39:06 +04:00
{
2011-10-08 22:06:19 +04:00
enum ath9k_int ints = ah - > imask ;
2010-04-16 01:39:06 +04:00
u32 mask , mask2 ;
struct ath9k_hw_capabilities * pCap = & ah - > caps ;
struct ath_common * common = ath9k_hw_common ( ah ) ;
2010-11-08 22:54:47 +03:00
if ( ! ( ints & ATH9K_INT_GLOBAL ) )
2011-02-21 17:02:41 +03:00
ath9k_hw_disable_interrupts ( ah ) ;
2010-04-16 01:39:06 +04:00
2011-12-16 02:55:53 +04:00
ath_dbg ( common , INTERRUPT , " New interrupt mask 0x%x \n " , ints ) ;
2010-04-16 01:39:06 +04:00
mask = ints & ATH9K_INT_COMMON ;
mask2 = 0 ;
if ( ints & ATH9K_INT_TX ) {
if ( ah - > config . tx_intr_mitigation )
mask | = AR_IMR_TXMINTR | AR_IMR_TXINTM ;
2010-04-26 23:04:41 +04:00
else {
if ( ah - > txok_interrupt_mask )
mask | = AR_IMR_TXOK ;
if ( ah - > txdesc_interrupt_mask )
mask | = AR_IMR_TXDESC ;
}
2010-04-16 01:39:06 +04:00
if ( ah - > txerr_interrupt_mask )
mask | = AR_IMR_TXERR ;
if ( ah - > txeol_interrupt_mask )
mask | = AR_IMR_TXEOL ;
}
if ( ints & ATH9K_INT_RX ) {
if ( AR_SREV_9300_20_OR_LATER ( ah ) ) {
mask | = AR_IMR_RXERR | AR_IMR_RXOK_HP ;
if ( ah - > config . rx_intr_mitigation ) {
mask & = ~ AR_IMR_RXOK_LP ;
mask | = AR_IMR_RXMINTR | AR_IMR_RXINTM ;
} else {
mask | = AR_IMR_RXOK_LP ;
}
} else {
if ( ah - > config . rx_intr_mitigation )
mask | = AR_IMR_RXMINTR | AR_IMR_RXINTM ;
else
mask | = AR_IMR_RXOK | AR_IMR_RXDESC ;
}
if ( ! ( pCap - > hw_caps & ATH9K_HW_CAP_AUTOSLEEP ) )
mask | = AR_IMR_GENTMR ;
}
2011-04-26 09:09:54 +04:00
if ( ints & ATH9K_INT_GENTIMER )
mask | = AR_IMR_GENTMR ;
2010-04-16 01:39:06 +04:00
if ( ints & ( ATH9K_INT_BMISC ) ) {
mask | = AR_IMR_BCNMISC ;
if ( ints & ATH9K_INT_TIM )
mask2 | = AR_IMR_S2_TIM ;
if ( ints & ATH9K_INT_DTIM )
mask2 | = AR_IMR_S2_DTIM ;
if ( ints & ATH9K_INT_DTIMSYNC )
mask2 | = AR_IMR_S2_DTIMSYNC ;
if ( ints & ATH9K_INT_CABEND )
mask2 | = AR_IMR_S2_CABEND ;
if ( ints & ATH9K_INT_TSFOOR )
mask2 | = AR_IMR_S2_TSFOOR ;
}
if ( ints & ( ATH9K_INT_GTT | ATH9K_INT_CST ) ) {
mask | = AR_IMR_BCNMISC ;
if ( ints & ATH9K_INT_GTT )
mask2 | = AR_IMR_S2_GTT ;
if ( ints & ATH9K_INT_CST )
mask2 | = AR_IMR_S2_CST ;
}
2011-12-16 02:55:53 +04:00
ath_dbg ( common , INTERRUPT , " new IMR 0x%x \n " , mask ) ;
2010-04-16 01:39:06 +04:00
REG_WRITE ( ah , AR_IMR , mask ) ;
ah - > imrs2_reg & = ~ ( AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST ) ;
ah - > imrs2_reg | = mask2 ;
REG_WRITE ( ah , AR_IMR_S2 , ah - > imrs2_reg ) ;
if ( ! ( pCap - > hw_caps & ATH9K_HW_CAP_AUTOSLEEP ) ) {
if ( ints & ATH9K_INT_TIM_TIMER )
REG_SET_BIT ( ah , AR_IMR_S5 , AR_IMR_S5_TIM_TIMER ) ;
else
REG_CLR_BIT ( ah , AR_IMR_S5 , AR_IMR_S5_TIM_TIMER ) ;
}
2010-11-08 22:54:47 +03:00
return ;
2010-04-16 01:39:06 +04:00
}
EXPORT_SYMBOL ( ath9k_hw_set_interrupts ) ;