2019-05-27 09:55:21 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2012-05-16 00:50:22 +04:00
/*
* Copyright 2007 - 2012 Siemens AG
*
* Written by :
* Dmitry Eremin - Solenikov < dbaryshkov @ gmail . com >
* Sergey Lapin < slapin @ ossfans . org >
* Maxim Gorbachyov < maxim . gorbachev @ siemens . com >
* Alexander Smirnov < alex . bluesman . smirnov @ gmail . com >
*/
# include <linux/netdevice.h>
# include <linux/if_arp.h>
# include <linux/crc-ccitt.h>
2014-10-27 19:13:28 +03:00
# include <asm/unaligned.h>
2012-05-16 00:50:22 +04:00
2014-10-26 11:37:09 +03:00
# include <net/rtnetlink.h>
2013-04-03 08:00:56 +04:00
# include <net/ieee802154_netdev.h>
2012-05-16 00:50:22 +04:00
# include <net/mac802154.h>
2014-10-25 11:41:02 +04:00
# include <net/cfg802154.h>
2012-05-16 00:50:22 +04:00
2014-10-25 11:41:00 +04:00
# include "ieee802154_i.h"
2014-10-28 20:21:21 +03:00
# include "driver-ops.h"
2012-05-16 00:50:22 +04:00
2022-05-19 18:05:06 +03:00
void ieee802154_xmit_sync_worker ( struct work_struct * work )
2012-05-16 00:50:22 +04:00
{
2015-07-21 17:44:47 +03:00
struct ieee802154_local * local =
2022-05-19 18:05:07 +03:00
container_of ( work , struct ieee802154_local , sync_tx_work ) ;
2015-07-21 17:44:47 +03:00
struct sk_buff * skb = local - > tx_skb ;
2014-10-26 11:37:12 +03:00
struct net_device * dev = skb - > dev ;
2012-05-16 00:50:22 +04:00
int res ;
2014-10-28 20:21:21 +03:00
res = drv_xmit_sync ( local , skb ) ;
2014-10-26 11:37:09 +03:00
if ( res )
goto err_tx ;
2014-10-26 11:37:12 +03:00
dev - > stats . tx_packets + + ;
dev - > stats . tx_bytes + = skb - > len ;
2020-09-08 13:40:25 +03:00
ieee802154_xmit_complete ( & local - > hw , skb , false ) ;
2014-10-26 11:37:09 +03:00
return ;
err_tx :
/* Restart the netif queue on each sub_if_data object. */
2022-05-19 18:05:10 +03:00
ieee802154_release_queue ( local ) ;
2022-06-13 07:37:35 +03:00
if ( atomic_dec_and_test ( & local - > phy - > ongoing_txs ) )
2022-05-19 18:05:13 +03:00
wake_up ( & local - > phy - > sync_txq ) ;
2014-10-26 11:37:09 +03:00
kfree_skb ( skb ) ;
2014-10-26 11:37:12 +03:00
netdev_dbg ( dev , " transmission failed \n " ) ;
2012-05-16 00:50:22 +04:00
}
2014-10-26 11:37:04 +03:00
static netdev_tx_t
2014-10-26 11:37:13 +03:00
ieee802154_tx ( struct ieee802154_local * local , struct sk_buff * skb )
2012-05-16 00:50:22 +04:00
{
2014-10-26 11:37:12 +03:00
struct net_device * dev = skb - > dev ;
2014-10-26 11:37:08 +03:00
int ret ;
2012-05-16 00:50:22 +04:00
2014-10-29 23:34:34 +03:00
if ( ! ( local - > hw . flags & IEEE802154_HW_TX_OMIT_CKSUM ) ) {
2018-07-02 23:32:03 +03:00
struct sk_buff * nskb ;
u16 crc ;
2014-07-02 07:31:09 +04:00
2018-07-02 23:32:03 +03:00
if ( unlikely ( skb_tailroom ( skb ) < IEEE802154_FCS_LEN ) ) {
nskb = skb_copy_expand ( skb , 0 , IEEE802154_FCS_LEN ,
GFP_ATOMIC ) ;
if ( likely ( nskb ) ) {
consume_skb ( skb ) ;
skb = nskb ;
} else {
2022-05-19 18:05:08 +03:00
goto err_free_skb ;
2018-07-02 23:32:03 +03:00
}
}
crc = crc_ccitt ( 0 , skb - > data , skb - > len ) ;
2014-10-27 19:13:28 +03:00
put_unaligned_le16 ( crc , skb_put ( skb , 2 ) ) ;
2012-05-16 00:50:22 +04:00
}
2013-04-03 08:00:56 +04:00
/* Stop the netif queue on each sub_if_data object. */
2022-05-19 18:05:10 +03:00
ieee802154_hold_queue ( local ) ;
2022-05-19 18:05:09 +03:00
atomic_inc ( & local - > phy - > ongoing_txs ) ;
2013-04-03 08:00:56 +04:00
2022-05-19 18:05:07 +03:00
/* Drivers should preferably implement the async callback. In some rare
* cases they only provide a sync callback which we will use as a
* fallback .
*/
2014-10-26 11:37:08 +03:00
if ( local - > ops - > xmit_async ) {
2020-09-08 13:40:25 +03:00
unsigned int len = skb - > len ;
2014-10-28 20:21:21 +03:00
ret = drv_xmit_async ( local , skb ) ;
2022-05-19 18:05:08 +03:00
if ( ret )
goto err_wake_netif_queue ;
2014-10-26 11:37:12 +03:00
dev - > stats . tx_packets + + ;
2020-09-08 13:40:25 +03:00
dev - > stats . tx_bytes + = len ;
2014-10-26 11:37:08 +03:00
} else {
2015-07-21 17:44:47 +03:00
local - > tx_skb = skb ;
2022-05-19 18:05:07 +03:00
queue_work ( local - > workqueue , & local - > sync_tx_work ) ;
2014-10-26 11:37:08 +03:00
}
2012-05-16 00:50:22 +04:00
return NETDEV_TX_OK ;
2014-08-11 15:25:10 +04:00
2022-05-19 18:05:08 +03:00
err_wake_netif_queue :
2022-05-19 18:05:10 +03:00
ieee802154_release_queue ( local ) ;
2022-06-13 07:37:35 +03:00
if ( atomic_dec_and_test ( & local - > phy - > ongoing_txs ) )
2022-05-19 18:05:13 +03:00
wake_up ( & local - > phy - > sync_txq ) ;
2022-05-19 18:05:08 +03:00
err_free_skb :
2014-08-11 15:25:10 +04:00
kfree_skb ( skb ) ;
return NETDEV_TX_OK ;
2012-05-16 00:50:22 +04:00
}
2014-10-26 11:37:01 +03:00
2022-05-19 18:05:13 +03:00
static int ieee802154_sync_queue ( struct ieee802154_local * local )
{
int ret ;
ieee802154_hold_queue ( local ) ;
ieee802154_disable_queue ( local ) ;
wait_event ( local - > phy - > sync_txq , ! atomic_read ( & local - > phy - > ongoing_txs ) ) ;
ret = local - > tx_result ;
ieee802154_release_queue ( local ) ;
return ret ;
}
int ieee802154_sync_and_hold_queue ( struct ieee802154_local * local )
{
2022-05-19 18:05:15 +03:00
int ret ;
2022-05-19 18:05:13 +03:00
ieee802154_hold_queue ( local ) ;
2022-05-19 18:05:15 +03:00
ret = ieee802154_sync_queue ( local ) ;
set_bit ( WPAN_PHY_FLAG_STATE_QUEUE_STOPPED , & local - > phy - > flags ) ;
2022-05-19 18:05:13 +03:00
2022-05-19 18:05:15 +03:00
return ret ;
2022-05-19 18:05:13 +03:00
}
2022-05-19 18:05:14 +03:00
int ieee802154_mlme_op_pre ( struct ieee802154_local * local )
{
return ieee802154_sync_and_hold_queue ( local ) ;
}
2023-01-03 19:56:43 +03:00
int ieee802154_mlme_tx_locked ( struct ieee802154_local * local ,
struct ieee802154_sub_if_data * sdata ,
struct sk_buff * skb )
2022-05-19 18:05:14 +03:00
{
/* Avoid possible calls to ->ndo_stop() when we asynchronously perform
* MLME transmissions .
*/
2023-01-03 19:56:43 +03:00
ASSERT_RTNL ( ) ;
2022-05-19 18:05:14 +03:00
/* Ensure the device was not stopped, otherwise error out */
2023-01-03 19:56:43 +03:00
if ( ! local - > open_count )
2022-05-19 18:05:14 +03:00
return - ENETDOWN ;
2022-05-19 18:05:16 +03:00
/* Warn if the ieee802154 core thinks MLME frames can be sent while the
* net interface expects this cannot happen .
*/
2023-01-03 19:56:43 +03:00
if ( WARN_ON_ONCE ( ! netif_running ( sdata - > dev ) ) )
2022-05-19 18:05:16 +03:00
return - ENETDOWN ;
2022-05-19 18:05:14 +03:00
ieee802154_tx ( local , skb ) ;
2023-01-03 19:56:43 +03:00
return ieee802154_sync_queue ( local ) ;
}
int ieee802154_mlme_tx ( struct ieee802154_local * local ,
struct ieee802154_sub_if_data * sdata ,
struct sk_buff * skb )
{
int ret ;
2022-05-19 18:05:14 +03:00
2023-01-03 19:56:43 +03:00
rtnl_lock ( ) ;
ret = ieee802154_mlme_tx_locked ( local , sdata , skb ) ;
2022-05-19 18:05:14 +03:00
rtnl_unlock ( ) ;
return ret ;
}
void ieee802154_mlme_op_post ( struct ieee802154_local * local )
{
ieee802154_release_queue ( local ) ;
}
2022-06-17 22:29:14 +03:00
int ieee802154_mlme_tx_one ( struct ieee802154_local * local ,
struct ieee802154_sub_if_data * sdata ,
struct sk_buff * skb )
2022-05-19 18:05:14 +03:00
{
int ret ;
ieee802154_mlme_op_pre ( local ) ;
2022-06-17 22:29:14 +03:00
ret = ieee802154_mlme_tx ( local , sdata , skb ) ;
2022-05-19 18:05:14 +03:00
ieee802154_mlme_op_post ( local ) ;
return ret ;
}
2023-01-03 19:56:43 +03:00
int ieee802154_mlme_tx_one_locked ( struct ieee802154_local * local ,
struct ieee802154_sub_if_data * sdata ,
struct sk_buff * skb )
{
int ret ;
ieee802154_mlme_op_pre ( local ) ;
ret = ieee802154_mlme_tx_locked ( local , sdata , skb ) ;
ieee802154_mlme_op_post ( local ) ;
return ret ;
}
2022-05-19 18:05:15 +03:00
static bool ieee802154_queue_is_stopped ( struct ieee802154_local * local )
{
return test_bit ( WPAN_PHY_FLAG_STATE_QUEUE_STOPPED , & local - > phy - > flags ) ;
}
2022-05-19 18:05:11 +03:00
static netdev_tx_t
ieee802154_hot_tx ( struct ieee802154_local * local , struct sk_buff * skb )
{
2022-05-19 18:05:15 +03:00
/* Warn if the net interface tries to transmit frames while the
* ieee802154 core assumes the queue is stopped .
*/
WARN_ON_ONCE ( ieee802154_queue_is_stopped ( local ) ) ;
2022-05-19 18:05:11 +03:00
return ieee802154_tx ( local , skb ) ;
}
2014-10-26 11:37:13 +03:00
netdev_tx_t
ieee802154_monitor_start_xmit ( struct sk_buff * skb , struct net_device * dev )
2014-10-26 11:37:01 +03:00
{
struct ieee802154_sub_if_data * sdata = IEEE802154_DEV_TO_SUB_IF ( dev ) ;
skb - > skb_iif = dev - > ifindex ;
2022-05-19 18:05:11 +03:00
return ieee802154_hot_tx ( sdata - > local , skb ) ;
2014-10-26 11:37:01 +03:00
}
2014-10-26 11:37:13 +03:00
netdev_tx_t
ieee802154_subif_start_xmit ( struct sk_buff * skb , struct net_device * dev )
2014-10-26 11:37:01 +03:00
{
struct ieee802154_sub_if_data * sdata = IEEE802154_DEV_TO_SUB_IF ( dev ) ;
int rc ;
2015-09-28 10:00:26 +03:00
/* TODO we should move it to wpan_dev_hard_header and dev_hard_header
* functions . The reason is wireshark will show a mac header which is
* with security fields but the payload is not encrypted .
*/
2014-10-26 11:37:01 +03:00
rc = mac802154_llsec_encrypt ( & sdata - > sec , skb ) ;
if ( rc ) {
2014-10-26 11:37:10 +03:00
netdev_warn ( dev , " encryption failed: %i \n " , rc ) ;
2014-10-26 11:37:01 +03:00
kfree_skb ( skb ) ;
return NETDEV_TX_OK ;
}
skb - > skb_iif = dev - > ifindex ;
2022-05-19 18:05:11 +03:00
return ieee802154_hot_tx ( sdata - > local , skb ) ;
2014-10-26 11:37:01 +03:00
}