2008-02-05 16:42:23 -05:00
/*
2009-01-17 20:42:32 +01:00
Copyright ( C ) 2004 - 2009 rt2x00 SourceForge Project
2008-02-05 16:42:23 -05:00
< http : //rt2x00.serialmonkey.com>
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation ; either version 2 of the License , or
( at your option ) any later version .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with this program ; if not , write to the
Free Software Foundation , Inc . ,
59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
*/
/*
Module : rt2x00lib
Abstract : rt2x00 queue specific routines .
*/
# include <linux/kernel.h>
# include <linux/module.h>
2008-06-16 19:56:31 +02:00
# include <linux/dma-mapping.h>
2008-02-05 16:42:23 -05:00
# include "rt2x00.h"
# include "rt2x00lib.h"
2008-06-16 19:56:31 +02:00
struct sk_buff * rt2x00queue_alloc_rxskb ( struct rt2x00_dev * rt2x00dev ,
struct queue_entry * entry )
2008-06-06 22:54:12 +02:00
{
2008-06-16 19:56:31 +02:00
struct sk_buff * skb ;
struct skb_frame_desc * skbdesc ;
2008-08-04 16:37:44 +02:00
unsigned int frame_size ;
unsigned int head_size = 0 ;
unsigned int tail_size = 0 ;
2008-06-06 22:54:12 +02:00
/*
* The frame size includes descriptor size , because the
* hardware directly receive the frame into the skbuffer .
*/
2008-06-16 19:56:31 +02:00
frame_size = entry - > queue - > data_size + entry - > queue - > desc_size ;
2008-06-06 22:54:12 +02:00
/*
2008-07-04 14:56:07 +02:00
* The payload should be aligned to a 4 - byte boundary ,
* this means we need at least 3 bytes for moving the frame
* into the correct offset .
2008-06-06 22:54:12 +02:00
*/
2008-08-04 16:37:44 +02:00
head_size = 4 ;
/*
* For IV / EIV / ICV assembly we must make sure there is
* at least 8 bytes bytes available in headroom for IV / EIV
2008-12-03 17:29:48 +01:00
* and 8 bytes for ICV data as tailroon .
2008-08-04 16:37:44 +02:00
*/
if ( test_bit ( CONFIG_SUPPORT_HW_CRYPTO , & rt2x00dev - > flags ) ) {
head_size + = 8 ;
2008-12-03 17:29:48 +01:00
tail_size + = 8 ;
2008-08-04 16:37:44 +02:00
}
2008-06-06 22:54:12 +02:00
/*
* Allocate skbuffer .
*/
2008-08-04 16:37:44 +02:00
skb = dev_alloc_skb ( frame_size + head_size + tail_size ) ;
2008-06-06 22:54:12 +02:00
if ( ! skb )
return NULL ;
2008-08-04 16:37:44 +02:00
/*
* Make sure we not have a frame with the requested bytes
* available in the head and tail .
*/
skb_reserve ( skb , head_size ) ;
2008-06-06 22:54:12 +02:00
skb_put ( skb , frame_size ) ;
2008-06-16 19:56:31 +02:00
/*
* Populate skbdesc .
*/
skbdesc = get_skb_frame_desc ( skb ) ;
memset ( skbdesc , 0 , sizeof ( * skbdesc ) ) ;
skbdesc - > entry = entry ;
if ( test_bit ( DRIVER_REQUIRE_DMA , & rt2x00dev - > flags ) ) {
skbdesc - > skb_dma = dma_map_single ( rt2x00dev - > dev ,
skb - > data ,
skb - > len ,
DMA_FROM_DEVICE ) ;
skbdesc - > flags | = SKBDESC_DMA_MAPPED_RX ;
}
2008-06-06 22:54:12 +02:00
return skb ;
}
2008-06-16 19:56:08 +02:00
2008-06-16 19:56:31 +02:00
void rt2x00queue_map_txskb ( struct rt2x00_dev * rt2x00dev , struct sk_buff * skb )
2008-06-16 19:56:08 +02:00
{
2008-06-16 19:56:31 +02:00
struct skb_frame_desc * skbdesc = get_skb_frame_desc ( skb ) ;
2008-08-29 21:04:50 +02:00
/*
* If device has requested headroom , we should make sure that
* is also mapped to the DMA so it can be used for transfering
* additional descriptor information to the hardware .
*/
skb_push ( skb , rt2x00dev - > hw - > extra_tx_headroom ) ;
skbdesc - > skb_dma =
dma_map_single ( rt2x00dev - > dev , skb - > data , skb - > len , DMA_TO_DEVICE ) ;
/*
* Restore data pointer to original location again .
*/
skb_pull ( skb , rt2x00dev - > hw - > extra_tx_headroom ) ;
2008-06-16 19:56:31 +02:00
skbdesc - > flags | = SKBDESC_DMA_MAPPED_TX ;
}
EXPORT_SYMBOL_GPL ( rt2x00queue_map_txskb ) ;
void rt2x00queue_unmap_skb ( struct rt2x00_dev * rt2x00dev , struct sk_buff * skb )
{
struct skb_frame_desc * skbdesc = get_skb_frame_desc ( skb ) ;
if ( skbdesc - > flags & SKBDESC_DMA_MAPPED_RX ) {
dma_unmap_single ( rt2x00dev - > dev , skbdesc - > skb_dma , skb - > len ,
DMA_FROM_DEVICE ) ;
skbdesc - > flags & = ~ SKBDESC_DMA_MAPPED_RX ;
}
if ( skbdesc - > flags & SKBDESC_DMA_MAPPED_TX ) {
2008-08-29 21:04:50 +02:00
/*
* Add headroom to the skb length , it has been removed
* by the driver , but it was actually mapped to DMA .
*/
dma_unmap_single ( rt2x00dev - > dev , skbdesc - > skb_dma ,
skb - > len + rt2x00dev - > hw - > extra_tx_headroom ,
2008-06-16 19:56:31 +02:00
DMA_TO_DEVICE ) ;
skbdesc - > flags & = ~ SKBDESC_DMA_MAPPED_TX ;
}
}
void rt2x00queue_free_skb ( struct rt2x00_dev * rt2x00dev , struct sk_buff * skb )
{
2008-07-05 15:11:57 +02:00
if ( ! skb )
return ;
2008-06-20 22:10:53 +02:00
rt2x00queue_unmap_skb ( rt2x00dev , skb ) ;
2008-06-16 19:56:08 +02:00
dev_kfree_skb_any ( skb ) ;
}
2008-06-06 22:54:12 +02:00
2009-08-29 20:30:45 +02:00
void rt2x00queue_align_frame ( struct sk_buff * skb )
2009-04-26 16:08:50 +02:00
{
unsigned int frame_length = skb - > len ;
2009-08-29 20:30:45 +02:00
unsigned int align = ALIGN_SIZE ( skb , 0 ) ;
2009-04-26 16:08:50 +02:00
if ( ! align )
return ;
2009-08-29 20:30:45 +02:00
skb_push ( skb , align ) ;
memmove ( skb - > data , skb - > data + align , frame_length ) ;
skb_trim ( skb , frame_length ) ;
}
void rt2x00queue_align_payload ( struct sk_buff * skb , unsigned int header_lengt )
{
unsigned int frame_length = skb - > len ;
unsigned int align = ALIGN_SIZE ( skb , header_lengt ) ;
if ( ! align )
return ;
skb_push ( skb , align ) ;
memmove ( skb - > data , skb - > data + align , frame_length ) ;
skb_trim ( skb , frame_length ) ;
}
void rt2x00queue_insert_l2pad ( struct sk_buff * skb , unsigned int header_length )
{
struct skb_frame_desc * skbdesc = get_skb_frame_desc ( skb ) ;
unsigned int frame_length = skb - > len ;
unsigned int header_align = ALIGN_SIZE ( skb , 0 ) ;
unsigned int payload_align = ALIGN_SIZE ( skb , header_length ) ;
unsigned int l2pad = 4 - ( payload_align - header_align ) ;
if ( header_align = = payload_align ) {
/*
* Both header and payload must be moved the same
* amount of bytes to align them properly . This means
* we don ' t use the L2 padding but just move the entire
* frame .
*/
rt2x00queue_align_frame ( skb ) ;
} else if ( ! payload_align ) {
/*
* Simple L2 padding , only the header needs to be moved ,
* the payload is already properly aligned .
*/
skb_push ( skb , header_align ) ;
memmove ( skb - > data , skb - > data + header_align , frame_length ) ;
skbdesc - > flags | = SKBDESC_L2_PADDED ;
2009-04-26 16:08:50 +02:00
} else {
2009-08-29 20:30:45 +02:00
/*
*
* Complicated L2 padding , both header and payload need
* to be moved . By default we only move to the start
* of the buffer , so our header alignment needs to be
* increased if there is not enough room for the header
* to be moved .
*/
if ( payload_align > header_align )
header_align + = 4 ;
skb_push ( skb , header_align ) ;
memmove ( skb - > data , skb - > data + header_align , header_length ) ;
memmove ( skb - > data + header_length + l2pad ,
skb - > data + header_length + l2pad + header_align ,
frame_length - header_length ) ;
skbdesc - > flags | = SKBDESC_L2_PADDED ;
2009-04-26 16:08:50 +02:00
}
}
2009-08-29 20:30:45 +02:00
void rt2x00queue_remove_l2pad ( struct sk_buff * skb , unsigned int header_length )
{
struct skb_frame_desc * skbdesc = get_skb_frame_desc ( skb ) ;
unsigned int l2pad = 4 - ( header_length & 3 ) ;
if ( ! l2pad | | ( skbdesc - > flags & SKBDESC_L2_PADDED ) )
return ;
memmove ( skb - > data + l2pad , skb - > data , header_length ) ;
skb_pull ( skb , l2pad ) ;
}
2008-12-20 10:58:33 +01:00
static void rt2x00queue_create_tx_descriptor_seq ( struct queue_entry * entry ,
struct txentry_desc * txdesc )
{
struct ieee80211_tx_info * tx_info = IEEE80211_SKB_CB ( entry - > skb ) ;
struct ieee80211_hdr * hdr = ( struct ieee80211_hdr * ) entry - > skb - > data ;
struct rt2x00_intf * intf = vif_to_intf ( tx_info - > control . vif ) ;
unsigned long irqflags ;
if ( ! ( tx_info - > flags & IEEE80211_TX_CTL_ASSIGN_SEQ ) | |
unlikely ( ! tx_info - > control . vif ) )
return ;
/*
* Hardware should insert sequence counter .
* FIXME : We insert a software sequence counter first for
* hardware that doesn ' t support hardware sequence counting .
*
* This is wrong because beacons are not getting sequence
* numbers assigned properly .
*
* A secondary problem exists for drivers that cannot toggle
* sequence counting per - frame , since those will override the
* sequence counter given by mac80211 .
*/
spin_lock_irqsave ( & intf - > seqlock , irqflags ) ;
if ( test_bit ( ENTRY_TXD_FIRST_FRAGMENT , & txdesc - > flags ) )
intf - > seqno + = 0x10 ;
hdr - > seq_ctrl & = cpu_to_le16 ( IEEE80211_SCTL_FRAG ) ;
hdr - > seq_ctrl | = cpu_to_le16 ( intf - > seqno ) ;
spin_unlock_irqrestore ( & intf - > seqlock , irqflags ) ;
__set_bit ( ENTRY_TXD_GENERATE_SEQ , & txdesc - > flags ) ;
}
static void rt2x00queue_create_tx_descriptor_plcp ( struct queue_entry * entry ,
struct txentry_desc * txdesc ,
const struct rt2x00_rate * hwrate )
{
struct rt2x00_dev * rt2x00dev = entry - > queue - > rt2x00dev ;
struct ieee80211_tx_info * tx_info = IEEE80211_SKB_CB ( entry - > skb ) ;
struct ieee80211_tx_rate * txrate = & tx_info - > control . rates [ 0 ] ;
unsigned int data_length ;
unsigned int duration ;
unsigned int residual ;
/* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
data_length = entry - > skb - > len + 4 ;
data_length + = rt2x00crypto_tx_overhead ( rt2x00dev , entry - > skb ) ;
/*
* PLCP setup
* Length calculation depends on OFDM / CCK rate .
*/
txdesc - > signal = hwrate - > plcp ;
txdesc - > service = 0x04 ;
if ( hwrate - > flags & DEV_RATE_OFDM ) {
txdesc - > length_high = ( data_length > > 6 ) & 0x3f ;
txdesc - > length_low = data_length & 0x3f ;
} else {
/*
* Convert length to microseconds .
*/
residual = GET_DURATION_RES ( data_length , hwrate - > bitrate ) ;
duration = GET_DURATION ( data_length , hwrate - > bitrate ) ;
if ( residual ! = 0 ) {
duration + + ;
/*
* Check if we need to set the Length Extension
*/
if ( hwrate - > bitrate = = 110 & & residual < = 30 )
txdesc - > service | = 0x80 ;
}
txdesc - > length_high = ( duration > > 8 ) & 0xff ;
txdesc - > length_low = duration & 0xff ;
/*
* When preamble is enabled we should set the
* preamble bit for the signal .
*/
if ( txrate - > flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE )
txdesc - > signal | = 0x08 ;
}
}
2008-07-09 15:12:44 +02:00
static void rt2x00queue_create_tx_descriptor ( struct queue_entry * entry ,
struct txentry_desc * txdesc )
2008-05-10 13:46:13 +02:00
{
2008-05-15 12:55:27 +02:00
struct rt2x00_dev * rt2x00dev = entry - > queue - > rt2x00dev ;
2008-05-15 12:55:29 +02:00
struct ieee80211_tx_info * tx_info = IEEE80211_SKB_CB ( entry - > skb ) ;
2008-05-10 13:46:13 +02:00
struct ieee80211_hdr * hdr = ( struct ieee80211_hdr * ) entry - > skb - > data ;
2008-05-15 12:55:27 +02:00
struct ieee80211_rate * rate =
2008-05-15 12:55:29 +02:00
ieee80211_get_tx_rate ( rt2x00dev - > hw , tx_info ) ;
2008-05-10 13:46:13 +02:00
const struct rt2x00_rate * hwrate ;
memset ( txdesc , 0 , sizeof ( * txdesc ) ) ;
/*
* Initialize information from queue
*/
txdesc - > queue = entry - > queue - > qid ;
txdesc - > cw_min = entry - > queue - > cw_min ;
txdesc - > cw_max = entry - > queue - > cw_max ;
txdesc - > aifs = entry - > queue - > aifs ;
2009-04-26 16:08:50 +02:00
/*
* Header and alignment information .
*/
txdesc - > header_length = ieee80211_get_hdrlen_from_skb ( entry - > skb ) ;
txdesc - > l2pad = ALIGN_SIZE ( entry - > skb , txdesc - > header_length ) ;
2008-05-10 13:46:13 +02:00
/*
* Check whether this frame is to be acked .
*/
2008-05-15 12:55:29 +02:00
if ( ! ( tx_info - > flags & IEEE80211_TX_CTL_NO_ACK ) )
2008-05-10 13:46:13 +02:00
__set_bit ( ENTRY_TXD_ACK , & txdesc - > flags ) ;
/*
* Check if this is a RTS / CTS frame
*/
2008-06-16 19:54:57 +02:00
if ( ieee80211_is_rts ( hdr - > frame_control ) | |
ieee80211_is_cts ( hdr - > frame_control ) ) {
2008-05-10 13:46:13 +02:00
__set_bit ( ENTRY_TXD_BURST , & txdesc - > flags ) ;
2008-06-16 19:54:57 +02:00
if ( ieee80211_is_rts ( hdr - > frame_control ) )
2008-05-10 13:46:13 +02:00
__set_bit ( ENTRY_TXD_RTS_FRAME , & txdesc - > flags ) ;
2008-05-15 12:55:29 +02:00
else
2008-05-10 13:46:13 +02:00
__set_bit ( ENTRY_TXD_CTS_FRAME , & txdesc - > flags ) ;
2008-05-15 12:55:29 +02:00
if ( tx_info - > control . rts_cts_rate_idx > = 0 )
2008-05-15 12:55:27 +02:00
rate =
2008-05-15 12:55:29 +02:00
ieee80211_get_rts_cts_rate ( rt2x00dev - > hw , tx_info ) ;
2008-05-10 13:46:13 +02:00
}
/*
* Determine retry information .
*/
2008-10-21 12:40:02 +02:00
txdesc - > retry_limit = tx_info - > control . rates [ 0 ] . count - 1 ;
2008-12-02 18:20:04 +01:00
if ( txdesc - > retry_limit > = rt2x00dev - > long_retry )
2008-05-10 13:46:13 +02:00
__set_bit ( ENTRY_TXD_RETRY_MODE , & txdesc - > flags ) ;
/*
* Check if more fragments are pending
*/
2009-08-08 23:53:26 +02:00
if ( ieee80211_has_morefrags ( hdr - > frame_control ) | |
( tx_info - > flags & IEEE80211_TX_CTL_MORE_FRAMES ) ) {
2008-05-10 13:46:13 +02:00
__set_bit ( ENTRY_TXD_BURST , & txdesc - > flags ) ;
__set_bit ( ENTRY_TXD_MORE_FRAG , & txdesc - > flags ) ;
}
/*
* Beacons and probe responses require the tsf timestamp
* to be inserted into the frame .
*/
2008-06-16 19:54:57 +02:00
if ( ieee80211_is_beacon ( hdr - > frame_control ) | |
ieee80211_is_probe_resp ( hdr - > frame_control ) )
2008-05-10 13:46:13 +02:00
__set_bit ( ENTRY_TXD_REQ_TIMESTAMP , & txdesc - > flags ) ;
/*
* Determine with what IFS priority this frame should be send .
* Set ifs to IFS_SIFS when the this is not the first fragment ,
* or this fragment came after RTS / CTS .
*/
2008-12-20 10:58:33 +01:00
if ( ( tx_info - > flags & IEEE80211_TX_CTL_FIRST_FRAGMENT ) & &
! test_bit ( ENTRY_TXD_RTS_FRAME , & txdesc - > flags ) ) {
2008-05-10 13:46:13 +02:00
__set_bit ( ENTRY_TXD_FIRST_FRAGMENT , & txdesc - > flags ) ;
txdesc - > ifs = IFS_BACKOFF ;
2008-12-20 10:58:33 +01:00
} else
2008-05-10 13:46:13 +02:00
txdesc - > ifs = IFS_SIFS ;
2008-12-20 10:59:02 +01:00
/*
* Determine rate modulation .
*/
2008-05-10 13:46:13 +02:00
hwrate = rt2x00_get_rate ( rate - > hw_value ) ;
2008-12-20 10:59:02 +01:00
txdesc - > rate_mode = RATE_MODE_CCK ;
2008-12-20 10:58:33 +01:00
if ( hwrate - > flags & DEV_RATE_OFDM )
2008-12-20 10:59:02 +01:00
txdesc - > rate_mode = RATE_MODE_OFDM ;
2008-05-10 13:46:13 +02:00
2008-12-20 10:58:33 +01:00
/*
* Apply TX descriptor handling by components
*/
rt2x00crypto_create_tx_descriptor ( entry , txdesc ) ;
2009-04-26 16:09:32 +02:00
rt2x00ht_create_tx_descriptor ( entry , txdesc , hwrate ) ;
2008-12-20 10:58:33 +01:00
rt2x00queue_create_tx_descriptor_seq ( entry , txdesc ) ;
rt2x00queue_create_tx_descriptor_plcp ( entry , txdesc , hwrate ) ;
2008-05-10 13:46:13 +02:00
}
2008-07-09 15:12:44 +02:00
static void rt2x00queue_write_tx_descriptor ( struct queue_entry * entry ,
struct txentry_desc * txdesc )
2008-05-10 13:46:13 +02:00
{
2008-06-06 22:53:14 +02:00
struct data_queue * queue = entry - > queue ;
struct rt2x00_dev * rt2x00dev = queue - > rt2x00dev ;
2008-05-10 13:46:13 +02:00
rt2x00dev - > ops - > lib - > write_tx_desc ( rt2x00dev , entry - > skb , txdesc ) ;
/*
* All processing on the frame has been completed , this means
* it is now ready to be dumped to userspace through debugfs .
*/
rt2x00debug_dump_frame ( rt2x00dev , DUMP_FRAME_TX , entry - > skb ) ;
/*
2008-06-06 22:53:14 +02:00
* Check if we need to kick the queue , there are however a few rules
* 1 ) Don ' t kick beacon queue
* 2 ) Don ' t kick unless this is the last in frame in a burst .
* When the burst flag is set , this frame is always followed
* by another frame which in some way are related to eachother .
* This is true for fragments , RTS or CTS - to - self frames .
* 3 ) Rule 2 can be broken when the available entries
* in the queue are less then a certain threshold .
2008-05-10 13:46:13 +02:00
*/
2008-06-06 22:53:14 +02:00
if ( entry - > queue - > qid = = QID_BEACON )
return ;
if ( rt2x00queue_threshold ( queue ) | |
! test_bit ( ENTRY_TXD_BURST , & txdesc - > flags ) )
rt2x00dev - > ops - > lib - > kick_tx_queue ( rt2x00dev , queue - > qid ) ;
2008-05-10 13:46:13 +02:00
}
2008-06-06 22:50:28 +02:00
int rt2x00queue_write_tx_frame ( struct data_queue * queue , struct sk_buff * skb )
{
2008-10-21 12:40:02 +02:00
struct ieee80211_tx_info * tx_info ;
2008-06-06 22:50:28 +02:00
struct queue_entry * entry = rt2x00queue_get_entry ( queue , Q_INDEX ) ;
struct txentry_desc txdesc ;
2008-06-16 19:56:54 +02:00
struct skb_frame_desc * skbdesc ;
2008-10-21 12:40:02 +02:00
u8 rate_idx , rate_flags ;
2008-06-06 22:50:28 +02:00
if ( unlikely ( rt2x00queue_full ( queue ) ) )
2008-11-12 00:01:37 +01:00
return - ENOBUFS ;
2008-06-06 22:50:28 +02:00
2008-08-29 21:04:26 +02:00
if ( test_and_set_bit ( ENTRY_OWNER_DEVICE_DATA , & entry - > flags ) ) {
2008-06-06 22:50:28 +02:00
ERROR ( queue - > rt2x00dev ,
" Arrived at non-free entry in the non-full queue %d. \n "
" Please file bug report to %s. \n " ,
queue - > qid , DRV_PROJECT ) ;
return - EINVAL ;
}
/*
* Copy all TX descriptor information into txdesc ,
* after that we are free to use the skb - > cb array
* for our information .
*/
entry - > skb = skb ;
rt2x00queue_create_tx_descriptor ( entry , & txdesc ) ;
2008-06-16 19:56:54 +02:00
/*
2008-10-21 12:40:02 +02:00
* All information is retrieved from the skb - > cb array ,
2008-08-04 16:37:44 +02:00
* now we should claim ownership of the driver part of that
2008-10-21 12:40:02 +02:00
* array , preserving the bitrate index and flags .
2008-06-16 19:56:54 +02:00
*/
2008-10-21 12:40:02 +02:00
tx_info = IEEE80211_SKB_CB ( skb ) ;
rate_idx = tx_info - > control . rates [ 0 ] . idx ;
rate_flags = tx_info - > control . rates [ 0 ] . flags ;
2008-11-12 00:01:37 +01:00
skbdesc = get_skb_frame_desc ( skb ) ;
2008-06-16 19:56:54 +02:00
memset ( skbdesc , 0 , sizeof ( * skbdesc ) ) ;
skbdesc - > entry = entry ;
2008-10-21 12:40:02 +02:00
skbdesc - > tx_rate_idx = rate_idx ;
skbdesc - > tx_rate_flags = rate_flags ;
2008-06-16 19:56:54 +02:00
2008-08-04 16:37:44 +02:00
/*
* When hardware encryption is supported , and this frame
* is to be encrypted , we should strip the IV / EIV data from
* the frame so we can provide it to the driver seperately .
*/
if ( test_bit ( ENTRY_TXD_ENCRYPT , & txdesc . flags ) & &
2008-12-02 18:20:42 +01:00
! test_bit ( ENTRY_TXD_ENCRYPT_IV , & txdesc . flags ) ) {
2008-12-20 10:56:36 +01:00
if ( test_bit ( DRIVER_REQUIRE_COPY_IV , & queue - > rt2x00dev - > flags ) )
2009-04-26 16:08:30 +02:00
rt2x00crypto_tx_copy_iv ( skb , & txdesc ) ;
2008-12-02 18:20:42 +01:00
else
2009-04-26 16:08:30 +02:00
rt2x00crypto_tx_remove_iv ( skb , & txdesc ) ;
2008-12-02 18:20:42 +01:00
}
2008-08-04 16:37:44 +02:00
2009-08-08 23:53:47 +02:00
/*
* When DMA allocation is required we should guarentee to the
* driver that the DMA is aligned to a 4 - byte boundary .
* However some drivers require L2 padding to pad the payload
* rather then the header . This could be a requirement for
* PCI and USB devices , while header alignment only is valid
* for PCI devices .
*/
2009-04-26 16:08:50 +02:00
if ( test_bit ( DRIVER_REQUIRE_L2PAD , & queue - > rt2x00dev - > flags ) )
2009-08-29 20:30:45 +02:00
rt2x00queue_insert_l2pad ( entry - > skb , txdesc . header_length ) ;
2009-08-08 23:53:47 +02:00
else if ( test_bit ( DRIVER_REQUIRE_DMA , & queue - > rt2x00dev - > flags ) )
2009-08-29 20:30:45 +02:00
rt2x00queue_align_frame ( entry - > skb ) ;
2009-04-26 16:08:50 +02:00
2008-08-04 16:37:44 +02:00
/*
* It could be possible that the queue was corrupted and this
2008-11-12 00:01:37 +01:00
* call failed . Since we always return NETDEV_TX_OK to mac80211 ,
* this frame will simply be dropped .
2008-08-04 16:37:44 +02:00
*/
2008-06-06 22:50:28 +02:00
if ( unlikely ( queue - > rt2x00dev - > ops - > lib - > write_tx_data ( entry ) ) ) {
2008-08-29 21:04:26 +02:00
clear_bit ( ENTRY_OWNER_DEVICE_DATA , & entry - > flags ) ;
2008-08-04 16:37:44 +02:00
entry - > skb = NULL ;
2008-11-12 00:01:37 +01:00
return - EIO ;
2008-06-06 22:50:28 +02:00
}
2008-06-16 19:56:54 +02:00
if ( test_bit ( DRIVER_REQUIRE_DMA , & queue - > rt2x00dev - > flags ) )
rt2x00queue_map_txskb ( queue - > rt2x00dev , skb ) ;
2008-08-29 21:04:26 +02:00
set_bit ( ENTRY_DATA_PENDING , & entry - > flags ) ;
2008-06-06 22:50:28 +02:00
rt2x00queue_index_inc ( queue , Q_INDEX ) ;
rt2x00queue_write_tx_descriptor ( entry , & txdesc ) ;
return 0 ;
}
2008-07-09 15:12:44 +02:00
int rt2x00queue_update_beacon ( struct rt2x00_dev * rt2x00dev ,
2009-01-28 00:32:33 +01:00
struct ieee80211_vif * vif ,
const bool enable_beacon )
2008-07-09 15:12:44 +02:00
{
struct rt2x00_intf * intf = vif_to_intf ( vif ) ;
struct skb_frame_desc * skbdesc ;
struct txentry_desc txdesc ;
__le32 desc [ 16 ] ;
if ( unlikely ( ! intf - > beacon ) )
return - ENOBUFS ;
2009-08-08 23:55:18 +02:00
mutex_lock ( & intf - > beacon_skb_mutex ) ;
/*
* Clean up the beacon skb .
*/
rt2x00queue_free_skb ( rt2x00dev , intf - > beacon - > skb ) ;
intf - > beacon - > skb = NULL ;
2009-01-28 00:32:33 +01:00
if ( ! enable_beacon ) {
rt2x00dev - > ops - > lib - > kill_tx_queue ( rt2x00dev , QID_BEACON ) ;
2009-08-08 23:55:18 +02:00
mutex_unlock ( & intf - > beacon_skb_mutex ) ;
2009-01-28 00:32:33 +01:00
return 0 ;
}
2008-07-09 15:12:44 +02:00
intf - > beacon - > skb = ieee80211_beacon_get ( rt2x00dev - > hw , vif ) ;
2009-08-08 23:55:18 +02:00
if ( ! intf - > beacon - > skb ) {
mutex_unlock ( & intf - > beacon_skb_mutex ) ;
2008-07-09 15:12:44 +02:00
return - ENOMEM ;
2009-08-08 23:55:18 +02:00
}
2008-07-09 15:12:44 +02:00
/*
* Copy all TX descriptor information into txdesc ,
* after that we are free to use the skb - > cb array
* for our information .
*/
rt2x00queue_create_tx_descriptor ( intf - > beacon , & txdesc ) ;
/*
* For the descriptor we use a local array from where the
* driver can move it to the correct location required for
* the hardware .
*/
memset ( desc , 0 , sizeof ( desc ) ) ;
/*
* Fill in skb descriptor
*/
skbdesc = get_skb_frame_desc ( intf - > beacon - > skb ) ;
memset ( skbdesc , 0 , sizeof ( * skbdesc ) ) ;
skbdesc - > desc = desc ;
skbdesc - > desc_len = intf - > beacon - > queue - > desc_size ;
skbdesc - > entry = intf - > beacon ;
/*
* Write TX descriptor into reserved room in front of the beacon .
*/
rt2x00queue_write_tx_descriptor ( intf - > beacon , & txdesc ) ;
/*
* Send beacon to hardware .
* Also enable beacon generation , which might have been disabled
* by the driver during the config_beacon ( ) callback function .
*/
rt2x00dev - > ops - > lib - > write_beacon ( intf - > beacon ) ;
rt2x00dev - > ops - > lib - > kick_tx_queue ( rt2x00dev , QID_BEACON ) ;
2009-08-08 23:55:18 +02:00
mutex_unlock ( & intf - > beacon_skb_mutex ) ;
2008-07-09 15:12:44 +02:00
return 0 ;
}
2008-02-05 16:42:23 -05:00
struct data_queue * rt2x00queue_get_queue ( struct rt2x00_dev * rt2x00dev ,
2008-04-21 19:00:47 +02:00
const enum data_queue_qid queue )
2008-02-05 16:42:23 -05:00
{
int atim = test_bit ( DRIVER_REQUIRE_ATIM_QUEUE , & rt2x00dev - > flags ) ;
2009-01-28 00:32:33 +01:00
if ( queue = = QID_RX )
return rt2x00dev - > rx ;
2008-05-10 13:43:33 +02:00
if ( queue < rt2x00dev - > ops - > tx_queues & & rt2x00dev - > tx )
2008-02-05 16:42:23 -05:00
return & rt2x00dev - > tx [ queue ] ;
if ( ! rt2x00dev - > bcn )
return NULL ;
2008-04-21 19:00:47 +02:00
if ( queue = = QID_BEACON )
2008-02-05 16:42:23 -05:00
return & rt2x00dev - > bcn [ 0 ] ;
2008-04-21 19:00:47 +02:00
else if ( queue = = QID_ATIM & & atim )
2008-02-05 16:42:23 -05:00
return & rt2x00dev - > bcn [ 1 ] ;
return NULL ;
}
EXPORT_SYMBOL_GPL ( rt2x00queue_get_queue ) ;
struct queue_entry * rt2x00queue_get_entry ( struct data_queue * queue ,
enum queue_index index )
{
struct queue_entry * entry ;
2008-03-09 22:44:30 +01:00
unsigned long irqflags ;
2008-02-05 16:42:23 -05:00
if ( unlikely ( index > = Q_INDEX_MAX ) ) {
ERROR ( queue - > rt2x00dev ,
" Entry requested from invalid index type (%d) \n " , index ) ;
return NULL ;
}
2008-03-09 22:44:30 +01:00
spin_lock_irqsave ( & queue - > lock , irqflags ) ;
2008-02-05 16:42:23 -05:00
entry = & queue - > entries [ queue - > index [ index ] ] ;
2008-03-09 22:44:30 +01:00
spin_unlock_irqrestore ( & queue - > lock , irqflags ) ;
2008-02-05 16:42:23 -05:00
return entry ;
}
EXPORT_SYMBOL_GPL ( rt2x00queue_get_entry ) ;
void rt2x00queue_index_inc ( struct data_queue * queue , enum queue_index index )
{
2008-03-09 22:44:30 +01:00
unsigned long irqflags ;
2008-02-05 16:42:23 -05:00
if ( unlikely ( index > = Q_INDEX_MAX ) ) {
ERROR ( queue - > rt2x00dev ,
" Index change on invalid index type (%d) \n " , index ) ;
return ;
}
2008-03-09 22:44:30 +01:00
spin_lock_irqsave ( & queue - > lock , irqflags ) ;
2008-02-05 16:42:23 -05:00
queue - > index [ index ] + + ;
if ( queue - > index [ index ] > = queue - > limit )
queue - > index [ index ] = 0 ;
2008-02-03 15:55:21 +01:00
if ( index = = Q_INDEX ) {
queue - > length + + ;
} else if ( index = = Q_INDEX_DONE ) {
queue - > length - - ;
2008-10-17 12:16:17 -07:00
queue - > count + + ;
2008-02-03 15:55:21 +01:00
}
2008-02-05 16:42:23 -05:00
2008-03-09 22:44:30 +01:00
spin_unlock_irqrestore ( & queue - > lock , irqflags ) ;
2008-02-05 16:42:23 -05:00
}
static void rt2x00queue_reset ( struct data_queue * queue )
{
2008-03-09 22:44:30 +01:00
unsigned long irqflags ;
spin_lock_irqsave ( & queue - > lock , irqflags ) ;
2008-02-05 16:42:23 -05:00
queue - > count = 0 ;
queue - > length = 0 ;
memset ( queue - > index , 0 , sizeof ( queue - > index ) ) ;
2008-03-09 22:44:30 +01:00
spin_unlock_irqrestore ( & queue - > lock , irqflags ) ;
2008-02-05 16:42:23 -05:00
}
2009-01-28 00:32:33 +01:00
void rt2x00queue_stop_queues ( struct rt2x00_dev * rt2x00dev )
{
struct data_queue * queue ;
txall_queue_for_each ( rt2x00dev , queue )
rt2x00dev - > ops - > lib - > kill_tx_queue ( rt2x00dev , queue - > qid ) ;
}
2008-11-08 15:25:33 +01:00
void rt2x00queue_init_queues ( struct rt2x00_dev * rt2x00dev )
2008-02-05 16:42:23 -05:00
{
struct data_queue * queue ;
unsigned int i ;
2008-11-08 15:25:33 +01:00
queue_for_each ( rt2x00dev , queue ) {
2008-02-05 16:42:23 -05:00
rt2x00queue_reset ( queue ) ;
2008-07-21 19:06:02 +02:00
for ( i = 0 ; i < queue - > limit ; i + + ) {
queue - > entries [ i ] . flags = 0 ;
2008-11-08 15:25:33 +01:00
rt2x00dev - > ops - > lib - > clear_entry ( & queue - > entries [ i ] ) ;
2008-07-21 19:06:02 +02:00
}
2008-02-05 16:42:23 -05:00
}
}
static int rt2x00queue_alloc_entries ( struct data_queue * queue ,
const struct data_queue_desc * qdesc )
{
struct queue_entry * entries ;
unsigned int entry_size ;
unsigned int i ;
rt2x00queue_reset ( queue ) ;
queue - > limit = qdesc - > entry_num ;
2008-06-06 22:53:14 +02:00
queue - > threshold = DIV_ROUND_UP ( qdesc - > entry_num , 10 ) ;
2008-02-05 16:42:23 -05:00
queue - > data_size = qdesc - > data_size ;
queue - > desc_size = qdesc - > desc_size ;
/*
* Allocate all queue entries .
*/
entry_size = sizeof ( * entries ) + qdesc - > priv_size ;
entries = kzalloc ( queue - > limit * entry_size , GFP_KERNEL ) ;
if ( ! entries )
return - ENOMEM ;
# define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
2008-02-10 22:48:19 +01:00
( ( ( char * ) ( __base ) ) + ( ( __limit ) * ( __esize ) ) + \
( ( __index ) * ( __psize ) ) )
2008-02-05 16:42:23 -05:00
for ( i = 0 ; i < queue - > limit ; i + + ) {
entries [ i ] . flags = 0 ;
entries [ i ] . queue = queue ;
entries [ i ] . skb = NULL ;
entries [ i ] . entry_idx = i ;
entries [ i ] . priv_data =
QUEUE_ENTRY_PRIV_OFFSET ( entries , i , queue - > limit ,
sizeof ( * entries ) , qdesc - > priv_size ) ;
}
# undef QUEUE_ENTRY_PRIV_OFFSET
queue - > entries = entries ;
return 0 ;
}
2008-06-16 19:56:31 +02:00
static void rt2x00queue_free_skbs ( struct rt2x00_dev * rt2x00dev ,
struct data_queue * queue )
2008-06-16 19:56:08 +02:00
{
unsigned int i ;
if ( ! queue - > entries )
return ;
for ( i = 0 ; i < queue - > limit ; i + + ) {
if ( queue - > entries [ i ] . skb )
2008-06-16 19:56:31 +02:00
rt2x00queue_free_skb ( rt2x00dev , queue - > entries [ i ] . skb ) ;
2008-06-16 19:56:08 +02:00
}
}
2008-06-16 19:56:31 +02:00
static int rt2x00queue_alloc_rxskbs ( struct rt2x00_dev * rt2x00dev ,
struct data_queue * queue )
2008-06-16 19:56:08 +02:00
{
unsigned int i ;
struct sk_buff * skb ;
for ( i = 0 ; i < queue - > limit ; i + + ) {
2008-06-16 19:56:31 +02:00
skb = rt2x00queue_alloc_rxskb ( rt2x00dev , & queue - > entries [ i ] ) ;
2008-06-16 19:56:08 +02:00
if ( ! skb )
2008-06-20 22:10:53 +02:00
return - ENOMEM ;
2008-06-16 19:56:08 +02:00
queue - > entries [ i ] . skb = skb ;
}
return 0 ;
}
2008-02-05 16:42:23 -05:00
int rt2x00queue_initialize ( struct rt2x00_dev * rt2x00dev )
{
struct data_queue * queue ;
int status ;
status = rt2x00queue_alloc_entries ( rt2x00dev - > rx , rt2x00dev - > ops - > rx ) ;
if ( status )
goto exit ;
tx_queue_for_each ( rt2x00dev , queue ) {
status = rt2x00queue_alloc_entries ( queue , rt2x00dev - > ops - > tx ) ;
if ( status )
goto exit ;
}
status = rt2x00queue_alloc_entries ( rt2x00dev - > bcn , rt2x00dev - > ops - > bcn ) ;
if ( status )
goto exit ;
2008-06-16 19:56:08 +02:00
if ( test_bit ( DRIVER_REQUIRE_ATIM_QUEUE , & rt2x00dev - > flags ) ) {
status = rt2x00queue_alloc_entries ( & rt2x00dev - > bcn [ 1 ] ,
rt2x00dev - > ops - > atim ) ;
if ( status )
goto exit ;
}
2008-02-05 16:42:23 -05:00
2008-06-16 19:56:31 +02:00
status = rt2x00queue_alloc_rxskbs ( rt2x00dev , rt2x00dev - > rx ) ;
2008-02-05 16:42:23 -05:00
if ( status )
goto exit ;
return 0 ;
exit :
ERROR ( rt2x00dev , " Queue entries allocation failed. \n " ) ;
rt2x00queue_uninitialize ( rt2x00dev ) ;
return status ;
}
void rt2x00queue_uninitialize ( struct rt2x00_dev * rt2x00dev )
{
struct data_queue * queue ;
2008-06-16 19:56:31 +02:00
rt2x00queue_free_skbs ( rt2x00dev , rt2x00dev - > rx ) ;
2008-06-16 19:56:08 +02:00
2008-02-05 16:42:23 -05:00
queue_for_each ( rt2x00dev , queue ) {
kfree ( queue - > entries ) ;
queue - > entries = NULL ;
}
}
2008-02-10 22:51:41 +01:00
static void rt2x00queue_init ( struct rt2x00_dev * rt2x00dev ,
struct data_queue * queue , enum data_queue_qid qid )
{
spin_lock_init ( & queue - > lock ) ;
queue - > rt2x00dev = rt2x00dev ;
queue - > qid = qid ;
2008-08-29 21:05:45 +02:00
queue - > txop = 0 ;
2008-02-10 22:51:41 +01:00
queue - > aifs = 2 ;
queue - > cw_min = 5 ;
queue - > cw_max = 10 ;
}
2008-02-05 16:42:23 -05:00
int rt2x00queue_allocate ( struct rt2x00_dev * rt2x00dev )
{
struct data_queue * queue ;
enum data_queue_qid qid ;
unsigned int req_atim =
! ! test_bit ( DRIVER_REQUIRE_ATIM_QUEUE , & rt2x00dev - > flags ) ;
/*
* We need the following queues :
* RX : 1
2008-05-10 13:43:33 +02:00
* TX : ops - > tx_queues
2008-02-05 16:42:23 -05:00
* Beacon : 1
* Atim : 1 ( if required )
*/
2008-05-10 13:43:33 +02:00
rt2x00dev - > data_queues = 2 + rt2x00dev - > ops - > tx_queues + req_atim ;
2008-02-05 16:42:23 -05:00
queue = kzalloc ( rt2x00dev - > data_queues * sizeof ( * queue ) , GFP_KERNEL ) ;
if ( ! queue ) {
ERROR ( rt2x00dev , " Queue allocation failed. \n " ) ;
return - ENOMEM ;
}
/*
* Initialize pointers
*/
rt2x00dev - > rx = queue ;
rt2x00dev - > tx = & queue [ 1 ] ;
2008-05-10 13:43:33 +02:00
rt2x00dev - > bcn = & queue [ 1 + rt2x00dev - > ops - > tx_queues ] ;
2008-02-05 16:42:23 -05:00
/*
* Initialize queue parameters .
* RX : qid = QID_RX
* TX : qid = QID_AC_BE + index
* TX : cw_min : 2 ^ 5 = 32.
* TX : cw_max : 2 ^ 10 = 1024.
2008-06-03 20:29:05 +02:00
* BCN : qid = QID_BEACON
* ATIM : qid = QID_ATIM
2008-02-05 16:42:23 -05:00
*/
2008-02-10 22:51:41 +01:00
rt2x00queue_init ( rt2x00dev , rt2x00dev - > rx , QID_RX ) ;
2008-02-05 16:42:23 -05:00
2008-02-10 22:51:41 +01:00
qid = QID_AC_BE ;
tx_queue_for_each ( rt2x00dev , queue )
rt2x00queue_init ( rt2x00dev , queue , qid + + ) ;
2008-02-05 16:42:23 -05:00
2008-06-03 20:29:05 +02:00
rt2x00queue_init ( rt2x00dev , & rt2x00dev - > bcn [ 0 ] , QID_BEACON ) ;
2008-02-05 16:42:23 -05:00
if ( req_atim )
2008-06-03 20:29:05 +02:00
rt2x00queue_init ( rt2x00dev , & rt2x00dev - > bcn [ 1 ] , QID_ATIM ) ;
2008-02-05 16:42:23 -05:00
return 0 ;
}
void rt2x00queue_free ( struct rt2x00_dev * rt2x00dev )
{
kfree ( rt2x00dev - > rx ) ;
rt2x00dev - > rx = NULL ;
rt2x00dev - > tx = NULL ;
rt2x00dev - > bcn = NULL ;
}