2019-05-27 09:55:21 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/*
2014-10-26 11:37:05 +03:00
*
* Authors :
* Alexander Aring < aar @ pengutronix . de >
*
* Based on : net / mac80211 / util . c
*/
# include "ieee802154_i.h"
2015-06-24 12:36:34 +03:00
# include "driver-ops.h"
2014-10-26 11:37:05 +03:00
2014-11-12 05:36:51 +03:00
/* privid for wpan_phys to determine whether they belong to us or not */
const void * const mac802154_wpan_phy_privid = & mac802154_wpan_phy_privid ;
2022-05-19 18:05:10 +03:00
/**
* ieee802154_wake_queue - wake ieee802154 queue
2022-10-26 10:40:34 +03:00
* @ hw : main hardware object
2022-05-19 18:05:10 +03:00
*
* Tranceivers usually have either one transmit framebuffer or one framebuffer
* for both transmitting and receiving . Hence , the core currently only handles
* one frame at a time for each phy , which means we had to stop the queue to
* avoid new skb to come during the transmission . The queue then needs to be
* woken up after the operation .
*/
static void ieee802154_wake_queue ( struct ieee802154_hw * hw )
2014-10-26 11:37:05 +03:00
{
struct ieee802154_local * local = hw_to_local ( hw ) ;
struct ieee802154_sub_if_data * sdata ;
rcu_read_lock ( ) ;
2022-05-19 18:05:15 +03:00
clear_bit ( WPAN_PHY_FLAG_STATE_QUEUE_STOPPED , & local - > phy - > flags ) ;
2014-10-26 11:37:05 +03:00
list_for_each_entry_rcu ( sdata , & local - > interfaces , list ) {
if ( ! sdata - > dev )
continue ;
netif_wake_queue ( sdata - > dev ) ;
}
rcu_read_unlock ( ) ;
}
2022-05-19 18:05:10 +03:00
/**
* ieee802154_stop_queue - stop ieee802154 queue
2022-10-26 10:40:34 +03:00
* @ hw : main hardware object
2022-05-19 18:05:10 +03:00
*
* Tranceivers usually have either one transmit framebuffer or one framebuffer
* for both transmitting and receiving . Hence , the core currently only handles
* one frame at a time for each phy , which means we need to tell upper layers to
* stop giving us new skbs while we are busy with the transmitted one . The queue
* must then be stopped before transmitting .
*/
static void ieee802154_stop_queue ( struct ieee802154_hw * hw )
2014-10-26 11:37:05 +03:00
{
struct ieee802154_local * local = hw_to_local ( hw ) ;
struct ieee802154_sub_if_data * sdata ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( sdata , & local - > interfaces , list ) {
if ( ! sdata - > dev )
continue ;
netif_stop_queue ( sdata - > dev ) ;
}
rcu_read_unlock ( ) ;
}
2022-05-19 18:05:10 +03:00
void ieee802154_hold_queue ( struct ieee802154_local * local )
{
unsigned long flags ;
spin_lock_irqsave ( & local - > phy - > queue_lock , flags ) ;
if ( ! atomic_fetch_inc ( & local - > phy - > hold_txs ) )
ieee802154_stop_queue ( & local - > hw ) ;
spin_unlock_irqrestore ( & local - > phy - > queue_lock , flags ) ;
}
void ieee802154_release_queue ( struct ieee802154_local * local )
{
unsigned long flags ;
spin_lock_irqsave ( & local - > phy - > queue_lock , flags ) ;
2022-06-13 07:37:34 +03:00
if ( atomic_dec_and_test ( & local - > phy - > hold_txs ) )
2022-05-19 18:05:10 +03:00
ieee802154_wake_queue ( & local - > hw ) ;
spin_unlock_irqrestore ( & local - > phy - > queue_lock , flags ) ;
}
2014-10-26 11:37:05 +03:00
2022-05-19 18:05:12 +03:00
void ieee802154_disable_queue ( struct ieee802154_local * local )
{
struct ieee802154_sub_if_data * sdata ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( sdata , & local - > interfaces , list ) {
if ( ! sdata - > dev )
continue ;
netif_tx_disable ( sdata - > dev ) ;
}
rcu_read_unlock ( ) ;
}
2014-11-12 21:51:56 +03:00
enum hrtimer_restart ieee802154_xmit_ifs_timer ( struct hrtimer * timer )
2014-10-26 11:37:05 +03:00
{
2014-11-12 21:51:56 +03:00
struct ieee802154_local * local =
container_of ( timer , struct ieee802154_local , ifs_timer ) ;
2022-05-19 18:05:10 +03:00
ieee802154_release_queue ( local ) ;
2014-11-12 21:51:56 +03:00
return HRTIMER_NORESTART ;
}
void ieee802154_xmit_complete ( struct ieee802154_hw * hw , struct sk_buff * skb ,
bool ifs_handling )
{
2022-04-07 13:08:56 +03:00
struct ieee802154_local * local = hw_to_local ( hw ) ;
local - > tx_result = IEEE802154_SUCCESS ;
2014-11-12 21:51:56 +03:00
if ( ifs_handling ) {
2015-03-04 23:19:59 +03:00
u8 max_sifs_size ;
2014-11-12 21:51:56 +03:00
2015-03-04 23:19:59 +03:00
/* If transceiver sets CRC on his own we need to use lifs
* threshold len above 16 otherwise 18 , because it ' s not
* part of skb - > len .
*/
if ( hw - > flags & IEEE802154_HW_TX_OMIT_CKSUM )
max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE -
IEEE802154_FCS_LEN ;
else
max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE ;
if ( skb - > len > max_sifs_size )
2014-11-12 21:51:56 +03:00
hrtimer_start ( & local - > ifs_timer ,
2016-12-25 14:30:41 +03:00
hw - > phy - > lifs_period * NSEC_PER_USEC ,
2014-11-12 21:51:56 +03:00
HRTIMER_MODE_REL ) ;
else
hrtimer_start ( & local - > ifs_timer ,
2016-12-25 14:30:41 +03:00
hw - > phy - > sifs_period * NSEC_PER_USEC ,
2014-11-12 21:51:56 +03:00
HRTIMER_MODE_REL ) ;
} else {
2022-05-19 18:05:10 +03:00
ieee802154_release_queue ( local ) ;
2014-11-12 21:51:56 +03:00
}
2015-05-17 22:44:56 +03:00
dev_consume_skb_any ( skb ) ;
2022-06-13 07:37:35 +03:00
if ( atomic_dec_and_test ( & hw - > phy - > ongoing_txs ) )
2022-05-19 18:05:13 +03:00
wake_up ( & hw - > phy - > sync_txq ) ;
2014-10-26 11:37:05 +03:00
}
EXPORT_SYMBOL ( ieee802154_xmit_complete ) ;
2015-06-24 12:36:34 +03:00
2022-04-07 13:08:57 +03:00
void ieee802154_xmit_error ( struct ieee802154_hw * hw , struct sk_buff * skb ,
int reason )
{
struct ieee802154_local * local = hw_to_local ( hw ) ;
local - > tx_result = reason ;
2022-05-19 18:05:10 +03:00
ieee802154_release_queue ( local ) ;
2022-04-07 13:08:57 +03:00
dev_kfree_skb_any ( skb ) ;
2022-06-13 07:37:35 +03:00
if ( atomic_dec_and_test ( & hw - > phy - > ongoing_txs ) )
2022-05-19 18:05:13 +03:00
wake_up ( & hw - > phy - > sync_txq ) ;
2022-04-07 13:08:57 +03:00
}
EXPORT_SYMBOL ( ieee802154_xmit_error ) ;
2022-04-07 13:08:58 +03:00
void ieee802154_xmit_hw_error ( struct ieee802154_hw * hw , struct sk_buff * skb )
{
ieee802154_xmit_error ( hw , skb , IEEE802154_SYSTEM_ERROR ) ;
}
EXPORT_SYMBOL ( ieee802154_xmit_hw_error ) ;
2015-06-24 12:36:34 +03:00
void ieee802154_stop_device ( struct ieee802154_local * local )
{
flush_workqueue ( local - > workqueue ) ;
hrtimer_cancel ( & local - > ifs_timer ) ;
drv_stop ( local ) ;
}