2019-06-04 11:11:33 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
2007-05-05 22:45:53 +04:00
/*
* Copyright 2002 - 2004 , Instant802 Networks , Inc .
* Copyright 2005 , Devicescape Software , Inc .
2022-02-09 15:14:26 +03:00
* Copyright ( C ) 2019 , 2022 Intel Corporation
2007-05-05 22:45:53 +04:00
*/
# ifndef IEEE80211_KEY_H
# define IEEE80211_KEY_H
# include <linux/types.h>
2007-08-29 01:01:54 +04:00
# include <linux/list.h>
2007-05-05 22:45:53 +04:00
# include <linux/crypto.h>
2008-02-25 18:27:45 +03:00
# include <linux/rcupdate.h>
2019-06-12 19:19:54 +03:00
# include <crypto/arc4.h>
2007-05-05 22:45:53 +04:00
# include <net/mac80211.h>
2010-10-05 21:39:30 +04:00
# define NUM_DEFAULT_KEYS 4
# define NUM_DEFAULT_MGMT_KEYS 2
2020-02-22 16:25:44 +03:00
# define NUM_DEFAULT_BEACON_KEYS 2
2019-03-19 23:34:08 +03:00
# define INVALID_PTK_KEYIDX 2 /* Keyidx always pointing to a NULL key for PTK */
2010-10-05 21:39:30 +04:00
2007-08-29 01:01:55 +04:00
struct ieee80211_local ;
struct ieee80211_sub_if_data ;
2022-08-17 12:17:01 +03:00
struct ieee80211_link_data ;
2007-08-29 01:01:55 +04:00
struct sta_info ;
2008-02-25 18:27:45 +03:00
/**
* enum ieee80211_internal_key_flags - internal key flags
*
* @ KEY_FLAG_UPLOADED_TO_HARDWARE : Indicates that this key is present
* in the hardware for TX crypto hardware acceleration .
2011-07-12 14:30:59 +04:00
* @ KEY_FLAG_TAINTED : Key is tainted and packets should be dropped .
2008-02-25 18:27:45 +03:00
*/
enum ieee80211_internal_key_flags {
KEY_FLAG_UPLOADED_TO_HARDWARE = BIT ( 0 ) ,
2011-07-12 14:30:59 +04:00
KEY_FLAG_TAINTED = BIT ( 1 ) ,
2008-02-25 18:27:45 +03:00
} ;
2007-08-29 01:01:55 +04:00
2009-12-10 01:25:05 +03:00
enum ieee80211_internal_tkip_state {
TKIP_STATE_NOT_INIT ,
TKIP_STATE_PHASE1_DONE ,
TKIP_STATE_PHASE1_HW_UPLOADED ,
} ;
2008-05-15 03:26:19 +04:00
struct tkip_ctx {
mac80211: fix TKIP races, make API easier to use
Our current TKIP code races against itself on TX
since we can process multiple packets at the same
time on different ACs, but they all share the TX
context for TKIP. This can lead to bad IVs etc.
Also, the crypto offload helper code just obtains
the P1K/P2K from the cache, and can update it as
well, but there's no guarantee that packets are
really processed in order.
To fix these issues, first introduce a spinlock
that will protect the IV16/IV32 values in the TX
context. This first step makes sure that we don't
assign the same IV multiple times or get confused
in other ways.
Secondly, change the way the P1K cache works. I
add a field "p1k_iv32" that stores the value of
the IV32 when the P1K was last recomputed, and
if different from the last time, then a new P1K
is recomputed. This can cause the P1K computation
to flip back and forth if packets are processed
out of order. All this also happens under the new
spinlock.
Finally, because there are argument differences,
split up the ieee80211_get_tkip_key() API into
ieee80211_get_tkip_p1k() and ieee80211_get_tkip_p2k()
and give them the correct arguments.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-07-08 00:28:01 +04:00
u16 p1k [ 5 ] ; /* p1k cache */
u32 p1k_iv32 ; /* iv32 for which p1k computed */
2009-12-10 01:25:05 +03:00
enum ieee80211_internal_tkip_state state ;
2008-05-15 03:26:19 +04:00
} ;
2016-02-14 14:56:35 +03:00
struct tkip_ctx_rx {
struct tkip_ctx ctx ;
u32 iv32 ; /* current iv32 */
u16 iv16 ; /* current iv16 */
} ;
2007-05-05 22:45:53 +04:00
struct ieee80211_key {
2007-08-29 01:01:55 +04:00
struct ieee80211_local * local ;
struct ieee80211_sub_if_data * sdata ;
struct sta_info * sta ;
2008-04-08 19:56:52 +04:00
/* for sdata list */
2007-08-29 01:01:55 +04:00
struct list_head list ;
2010-06-01 12:19:19 +04:00
/* protected by key mutex */
2007-08-29 01:01:55 +04:00
unsigned int flags ;
2007-05-05 22:45:53 +04:00
union {
struct {
mac80211: fix TKIP races, make API easier to use
Our current TKIP code races against itself on TX
since we can process multiple packets at the same
time on different ACs, but they all share the TX
context for TKIP. This can lead to bad IVs etc.
Also, the crypto offload helper code just obtains
the P1K/P2K from the cache, and can update it as
well, but there's no guarantee that packets are
really processed in order.
To fix these issues, first introduce a spinlock
that will protect the IV16/IV32 values in the TX
context. This first step makes sure that we don't
assign the same IV multiple times or get confused
in other ways.
Secondly, change the way the P1K cache works. I
add a field "p1k_iv32" that stores the value of
the IV32 when the P1K was last recomputed, and
if different from the last time, then a new P1K
is recomputed. This can cause the P1K computation
to flip back and forth if packets are processed
out of order. All this also happens under the new
spinlock.
Finally, because there are argument differences,
split up the ieee80211_get_tkip_key() API into
ieee80211_get_tkip_p1k() and ieee80211_get_tkip_p2k()
and give them the correct arguments.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-07-08 00:28:01 +04:00
/* protects tx context */
spinlock_t txlock ;
2007-05-05 22:45:53 +04:00
/* last used TSC */
2008-05-15 03:26:19 +04:00
struct tkip_ctx tx ;
2007-05-05 22:45:53 +04:00
/* last received RSC */
2016-02-14 14:56:35 +03:00
struct tkip_ctx_rx rx [ IEEE80211_NUM_TIDS ] ;
2012-12-04 18:17:42 +04:00
/* number of mic failures */
u32 mic_failures ;
2007-05-05 22:45:53 +04:00
} tkip ;
struct {
2010-06-11 21:27:33 +04:00
/*
* Last received packet number . The first
2012-11-15 02:22:21 +04:00
* IEEE80211_NUM_TIDS counters are used with Data
2010-06-11 21:27:33 +04:00
* frames and the last counter is used with Robust
* Management frames .
*/
2013-05-08 15:09:08 +04:00
u8 rx_pn [ IEEE80211_NUM_TIDS + 1 ] [ IEEE80211_CCMP_PN_LEN ] ;
2013-10-10 11:55:20 +04:00
struct crypto_aead * tfm ;
2007-05-05 22:45:53 +04:00
u32 replays ; /* dot11RSNAStatsCCMPReplays */
} ccmp ;
2009-01-08 14:32:01 +03:00
struct {
2013-05-08 15:09:08 +04:00
u8 rx_pn [ IEEE80211_CMAC_PN_LEN ] ;
2017-02-06 13:49:28 +03:00
struct crypto_shash * tfm ;
2009-01-08 14:32:01 +03:00
u32 replays ; /* dot11RSNAStatsCMACReplays */
u32 icverrors ; /* dot11RSNAStatsCMACICVErrors */
} aes_cmac ;
2015-01-24 20:52:09 +03:00
struct {
u8 rx_pn [ IEEE80211_GMAC_PN_LEN ] ;
struct crypto_aead * tfm ;
u32 replays ; /* dot11RSNAStatsCMACReplays */
u32 icverrors ; /* dot11RSNAStatsCMACICVErrors */
} aes_gmac ;
2015-01-24 20:52:06 +03:00
struct {
/* Last received packet number. The first
* IEEE80211_NUM_TIDS counters are used with Data
* frames and the last counter is used with Robust
* Management frames .
*/
u8 rx_pn [ IEEE80211_NUM_TIDS + 1 ] [ IEEE80211_GCMP_PN_LEN ] ;
struct crypto_aead * tfm ;
u32 replays ; /* dot11RSNAStatsGCMPReplays */
} gcmp ;
2013-03-24 16:23:27 +04:00
struct {
/* generic cipher scheme */
2015-04-20 19:21:58 +03:00
u8 rx_pn [ IEEE80211_NUM_TIDS + 1 ] [ IEEE80211_MAX_PN_LEN ] ;
2013-03-24 16:23:27 +04:00
} gen ;
2007-05-05 22:45:53 +04:00
} u ;
2007-05-05 22:46:38 +04:00
# ifdef CONFIG_MAC80211_DEBUGFS
struct {
struct dentry * stalink ;
struct dentry * dir ;
2008-04-09 00:46:36 +04:00
int cnt ;
2007-05-05 22:46:38 +04:00
} debugfs ;
# endif
2021-05-11 21:02:43 +03:00
unsigned int color ;
2007-08-29 01:01:54 +04:00
/*
* key config , must be last because it contains key
* material as variable length member
*/
struct ieee80211_key_conf conf ;
2007-05-05 22:45:53 +04:00
} ;
2013-03-24 16:23:27 +04:00
struct ieee80211_key *
ieee80211_key_alloc ( u32 cipher , int idx , size_t key_len ,
const u8 * key_data ,
2022-02-09 15:14:26 +03:00
size_t seq_len , const u8 * seq ) ;
2008-02-25 18:27:45 +03:00
/*
* Insert a key into data structures ( sdata , sta if necessary )
2013-03-07 01:53:52 +04:00
* to make it used , free old key . On failure , also free the new key .
2008-02-25 18:27:45 +03:00
*/
2013-03-07 01:53:52 +04:00
int ieee80211_key_link ( struct ieee80211_key * key ,
2022-08-17 12:17:01 +03:00
struct ieee80211_link_data * link ,
2013-03-07 01:53:52 +04:00
struct sta_info * sta ) ;
2019-03-19 23:34:08 +03:00
int ieee80211_set_tx_key ( struct ieee80211_key * key ) ;
2013-03-07 01:58:23 +04:00
void ieee80211_key_free ( struct ieee80211_key * key , bool delay_tailroom ) ;
2013-03-07 01:53:52 +04:00
void ieee80211_key_free_unused ( struct ieee80211_key * key ) ;
2022-08-17 12:17:01 +03:00
void ieee80211_set_default_key ( struct ieee80211_link_data * link , int idx ,
2010-12-09 21:49:02 +03:00
bool uni , bool multi ) ;
2022-08-17 12:17:01 +03:00
void ieee80211_set_default_mgmt_key ( struct ieee80211_link_data * link ,
2009-01-08 14:32:02 +03:00
int idx ) ;
2022-08-17 12:17:01 +03:00
void ieee80211_set_default_beacon_key ( struct ieee80211_link_data * link ,
2020-02-22 16:25:44 +03:00
int idx ) ;
2022-08-17 12:17:01 +03:00
void ieee80211_remove_link_keys ( struct ieee80211_link_data * link ,
struct list_head * keys ) ;
void ieee80211_free_key_list ( struct ieee80211_local * local ,
struct list_head * keys ) ;
2013-12-05 02:47:09 +04:00
void ieee80211_free_keys ( struct ieee80211_sub_if_data * sdata ,
bool force_synchronize ) ;
2013-03-07 02:09:11 +04:00
void ieee80211_free_sta_keys ( struct ieee80211_local * local ,
struct sta_info * sta ) ;
2019-08-30 14:24:49 +03:00
void ieee80211_reenable_keys ( struct ieee80211_sub_if_data * sdata ) ;
2022-09-02 17:12:56 +03:00
int ieee80211_key_switch_links ( struct ieee80211_sub_if_data * sdata ,
unsigned long del_links_mask ,
unsigned long add_links_mask ) ;
2007-08-29 01:01:55 +04:00
2011-05-13 16:15:49 +04:00
# define key_mtx_dereference(local, ref) \
rcu_dereference_protected ( ref , lockdep_is_held ( & ( ( local ) - > key_mtx ) ) )
2022-05-20 00:27:22 +03:00
# define rcu_dereference_check_key_mtx(local, ref) \
rcu_dereference_check ( ref , lockdep_is_held ( & ( ( local ) - > key_mtx ) ) )
2011-05-13 16:15:49 +04:00
mac80211: defer tailroom counter manipulation when roaming
During roaming, the crypto_tx_tailroom_needed_cnt counter
will often take values 2,1,0,1,2 because first keys are
removed and then new keys are added. This is inefficient
because during the 0->1 transition, synchronize_net must
be called to avoid packet races, although typically no
packets would be flowing during that time.
To avoid that, defer the decrement (2->1, 1->0) when keys
are removed (by half a second). This means the counter
will really have the values 2,2,2,3,4 ... 2, thus never
reaching 0 and having to do the 0->1 transition.
Note that this patch entirely disregards the drivers for
which this optimisation was done to start with, for them
the key removal itself will be expensive because it has
to synchronize_net() after the counter is incremented to
remove the key from HW crypto. For them the sequence will
look like this: 0,1,0,1,0,1,0,1,0 (*) which is clearly a
lot more inefficient. This could be addressed separately,
during key removal the 0->1->0 sequence isn't necessary.
(*) it starts at 0 because HW crypto is on, then goes to
1 when HW crypto is disabled for a key, then back to
0 because the key is deleted; this happens for both
keys in the example. When new keys are added, it goes
to 1 first because they're added in software; when a
key is moved to hardware it goes back to 0
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2013-02-23 03:59:03 +04:00
void ieee80211_delayed_tailroom_dec ( struct work_struct * wk ) ;
2007-05-05 22:45:53 +04:00
# endif /* IEEE80211_KEY_H */