2023-10-23 22:21:54 +03:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
# ifndef _TCP_AO_H
# define _TCP_AO_H
# define TCP_AO_KEY_ALIGN 1
# define __tcp_ao_key_align __aligned(TCP_AO_KEY_ALIGN)
union tcp_ao_addr {
struct in_addr a4 ;
# if IS_ENABLED(CONFIG_IPV6)
struct in6_addr a6 ;
# endif
} ;
struct tcp_ao_hdr {
u8 kind ;
u8 length ;
u8 keyid ;
u8 rnext_keyid ;
} ;
2023-10-23 22:22:05 +03:00
struct tcp_ao_counters {
atomic64_t pkt_good ;
atomic64_t pkt_bad ;
atomic64_t key_not_found ;
atomic64_t ao_required ;
net/tcp: Ignore specific ICMPs for TCP-AO connections
Similarly to IPsec, RFC5925 prescribes:
">> A TCP-AO implementation MUST default to ignore incoming ICMPv4
messages of Type 3 (destination unreachable), Codes 2-4 (protocol
unreachable, port unreachable, and fragmentation needed -- ’hard
errors’), and ICMPv6 Type 1 (destination unreachable), Code 1
(administratively prohibited) and Code 4 (port unreachable) intended
for connections in synchronized states (ESTABLISHED, FIN-WAIT-1, FIN-
WAIT-2, CLOSE-WAIT, CLOSING, LAST-ACK, TIME-WAIT) that match MKTs."
A selftest (later in patch series) verifies that this attack is not
possible in this TCP-AO implementation.
Co-developed-by: Francesco Ruggeri <fruggeri@arista.com>
Signed-off-by: Francesco Ruggeri <fruggeri@arista.com>
Co-developed-by: Salam Noureddine <noureddine@arista.com>
Signed-off-by: Salam Noureddine <noureddine@arista.com>
Signed-off-by: Dmitry Safonov <dima@arista.com>
Acked-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-10-23 22:22:08 +03:00
atomic64_t dropped_icmp ;
2023-10-23 22:22:05 +03:00
} ;
2023-10-23 22:21:54 +03:00
struct tcp_ao_key {
struct hlist_node node ;
union tcp_ao_addr addr ;
u8 key [ TCP_AO_MAXKEYLEN ] __tcp_ao_key_align ;
unsigned int tcp_sigpool_id ;
unsigned int digest_size ;
2023-10-23 22:22:13 +03:00
int l3index ;
2023-10-23 22:21:54 +03:00
u8 prefixlen ;
u8 family ;
u8 keylen ;
u8 keyflags ;
u8 sndid ;
u8 rcvid ;
u8 maclen ;
struct rcu_head rcu ;
2023-10-23 22:22:05 +03:00
atomic64_t pkt_good ;
atomic64_t pkt_bad ;
2023-10-23 22:21:54 +03:00
u8 traffic_keys [ ] ;
} ;
static inline u8 * rcv_other_key ( struct tcp_ao_key * key )
{
return key - > traffic_keys ;
}
static inline u8 * snd_other_key ( struct tcp_ao_key * key )
{
return key - > traffic_keys + key - > digest_size ;
}
static inline int tcp_ao_maclen ( const struct tcp_ao_key * key )
{
return key - > maclen ;
}
2023-12-04 22:00:41 +03:00
/* Use tcp_ao_len_aligned() for TCP header calculations */
2023-10-23 22:21:54 +03:00
static inline int tcp_ao_len ( const struct tcp_ao_key * key )
{
return tcp_ao_maclen ( key ) + sizeof ( struct tcp_ao_hdr ) ;
}
2023-12-04 22:00:41 +03:00
static inline int tcp_ao_len_aligned ( const struct tcp_ao_key * key )
{
return round_up ( tcp_ao_len ( key ) , 4 ) ;
}
2023-10-23 22:21:54 +03:00
static inline unsigned int tcp_ao_digest_size ( struct tcp_ao_key * key )
{
return key - > digest_size ;
}
static inline int tcp_ao_sizeof_key ( const struct tcp_ao_key * key )
{
return sizeof ( struct tcp_ao_key ) + ( key - > digest_size < < 1 ) ;
}
struct tcp_ao_info {
/* List of tcp_ao_key's */
struct hlist_head head ;
/* current_key and rnext_key aren't maintained on listen sockets.
* Their purpose is to cache keys on established connections ,
* saving needless lookups . Never dereference any of them from
* listen sockets .
* : : current_key may change in RX to the key that was requested by
* the peer , please use READ_ONCE ( ) / WRITE_ONCE ( ) in order to avoid
* load / store tearing .
* Do the same for : : rnext_key , if you don ' t hold socket lock
* ( it ' s changed only by userspace request in setsockopt ( ) ) .
*/
struct tcp_ao_key * current_key ;
struct tcp_ao_key * rnext_key ;
2023-10-23 22:22:05 +03:00
struct tcp_ao_counters counters ;
2023-10-23 22:21:55 +03:00
u32 ao_required : 1 ,
net/tcp: Ignore specific ICMPs for TCP-AO connections
Similarly to IPsec, RFC5925 prescribes:
">> A TCP-AO implementation MUST default to ignore incoming ICMPv4
messages of Type 3 (destination unreachable), Codes 2-4 (protocol
unreachable, port unreachable, and fragmentation needed -- ’hard
errors’), and ICMPv6 Type 1 (destination unreachable), Code 1
(administratively prohibited) and Code 4 (port unreachable) intended
for connections in synchronized states (ESTABLISHED, FIN-WAIT-1, FIN-
WAIT-2, CLOSE-WAIT, CLOSING, LAST-ACK, TIME-WAIT) that match MKTs."
A selftest (later in patch series) verifies that this attack is not
possible in this TCP-AO implementation.
Co-developed-by: Francesco Ruggeri <fruggeri@arista.com>
Signed-off-by: Francesco Ruggeri <fruggeri@arista.com>
Co-developed-by: Salam Noureddine <noureddine@arista.com>
Signed-off-by: Salam Noureddine <noureddine@arista.com>
Signed-off-by: Dmitry Safonov <dima@arista.com>
Acked-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-10-23 22:22:08 +03:00
accept_icmps : 1 ,
__unused : 30 ;
2023-10-23 22:21:54 +03:00
__be32 lisn ;
__be32 risn ;
2023-10-23 22:22:06 +03:00
/* Sequence Number Extension (SNE) are upper 4 bytes for SEQ,
* that protect TCP - AO connection from replayed old TCP segments .
* See RFC5925 ( 6.2 ) .
* In order to get correct SNE , there ' s a helper tcp_ao_compute_sne ( ) .
* It needs SEQ basis to understand whereabouts are lower SEQ numbers .
* According to that basis vector , it can provide incremented SNE
* when SEQ rolls over or provide decremented SNE when there ' s
* a retransmitted segment from before - rolling over .
* - for request sockets such basis is rcv_isn / snt_isn , which seems
* good enough as it ' s unexpected to receive 4 Gbytes on reqsk .
* - for full sockets the basis is rcv_nxt / snd_una . snd_una is
* taken instead of snd_nxt as currently it ' s easier to track
* in tcp_snd_una_update ( ) , rather than updating SNE in all
* WRITE_ONCE ( tp - > snd_nxt , . . . )
* - for time - wait sockets the basis is tw_rcv_nxt / tw_snd_nxt .
* tw_snd_nxt is not expected to change , while tw_rcv_nxt may .
*/
u32 snd_sne ;
u32 rcv_sne ;
2023-10-23 22:22:01 +03:00
refcount_t refcnt ; /* Protects twsk destruction */
2023-10-23 22:21:54 +03:00
struct rcu_head rcu ;
} ;
2024-01-04 16:42:39 +03:00
# ifdef CONFIG_TCP_MD5SIG
# include <linux/jump_label.h>
extern struct static_key_false_deferred tcp_md5_needed ;
# define static_branch_tcp_md5() static_branch_unlikely(&tcp_md5_needed.key)
# else
# define static_branch_tcp_md5() false
# endif
# ifdef CONFIG_TCP_AO
/* TCP-AO structures and functions */
# include <linux/jump_label.h>
extern struct static_key_false_deferred tcp_ao_needed ;
# define static_branch_tcp_ao() static_branch_unlikely(&tcp_ao_needed.key)
# else
# define static_branch_tcp_ao() false
# endif
static inline bool tcp_hash_should_produce_warnings ( void )
{
return static_branch_tcp_md5 ( ) | | static_branch_tcp_ao ( ) ;
}
2023-10-23 22:22:07 +03:00
# define tcp_hash_fail(msg, family, skb, fmt, ...) \
do { \
const struct tcphdr * th = tcp_hdr ( skb ) ; \
2023-11-01 07:52:33 +03:00
char hdr_flags [ 6 ] ; \
2023-10-23 22:22:07 +03:00
char * f = hdr_flags ; \
\
2024-01-04 16:42:39 +03:00
if ( ! tcp_hash_should_produce_warnings ( ) ) \
break ; \
2023-10-23 22:22:07 +03:00
if ( th - > fin ) \
* f + + = ' F ' ; \
if ( th - > syn ) \
* f + + = ' S ' ; \
if ( th - > rst ) \
* f + + = ' R ' ; \
2023-11-01 07:52:33 +03:00
if ( th - > psh ) \
* f + + = ' P ' ; \
2023-10-23 22:22:07 +03:00
if ( th - > ack ) \
2023-11-01 07:52:33 +03:00
* f + + = ' . ' ; \
* f = 0 ; \
2023-10-23 22:22:07 +03:00
if ( ( family ) = = AF_INET ) { \
2023-11-01 07:52:33 +03:00
net_info_ratelimited ( " %s for %pI4.%d->%pI4.%d [%s] " fmt " \n " , \
2023-10-23 22:22:07 +03:00
msg , & ip_hdr ( skb ) - > saddr , ntohs ( th - > source ) , \
& ip_hdr ( skb ) - > daddr , ntohs ( th - > dest ) , \
hdr_flags , # # __VA_ARGS__ ) ; \
} else { \
2023-11-01 07:52:33 +03:00
net_info_ratelimited ( " %s for [%pI6c].%d->[%pI6c].%d [%s] " fmt " \n " , \
2023-10-23 22:22:07 +03:00
msg , & ipv6_hdr ( skb ) - > saddr , ntohs ( th - > source ) , \
& ipv6_hdr ( skb ) - > daddr , ntohs ( th - > dest ) , \
hdr_flags , # # __VA_ARGS__ ) ; \
} \
} while ( 0 )
2023-10-23 22:21:55 +03:00
# ifdef CONFIG_TCP_AO
2023-10-23 22:21:57 +03:00
/* TCP-AO structures and functions */
struct tcp4_ao_context {
__be32 saddr ;
__be32 daddr ;
__be16 sport ;
__be16 dport ;
__be32 sisn ;
__be32 disn ;
} ;
struct tcp6_ao_context {
struct in6_addr saddr ;
struct in6_addr daddr ;
__be16 sport ;
__be16 dport ;
__be32 sisn ;
__be32 disn ;
} ;
struct tcp_sigpool ;
2023-10-23 22:22:04 +03:00
# define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \
TCPF_CLOSE | TCPF_CLOSE_WAIT | \
TCPF_LAST_ACK | TCPF_CLOSING )
2023-10-23 22:21:57 +03:00
2023-10-23 22:21:58 +03:00
int tcp_ao_transmit_skb ( struct sock * sk , struct sk_buff * skb ,
struct tcp_ao_key * key , struct tcphdr * th ,
__u8 * hash_location ) ;
int tcp_ao_hash_skb ( unsigned short int family ,
char * ao_hash , struct tcp_ao_key * key ,
const struct sock * sk , const struct sk_buff * skb ,
const u8 * tkey , int hash_offset , u32 sne ) ;
2023-10-23 22:21:55 +03:00
int tcp_parse_ao ( struct sock * sk , int cmd , unsigned short int family ,
sockptr_t optval , int optlen ) ;
2023-10-23 22:22:00 +03:00
struct tcp_ao_key * tcp_ao_established_key ( struct tcp_ao_info * ao ,
int sndid , int rcvid ) ;
2023-10-23 22:22:02 +03:00
int tcp_ao_copy_all_matching ( const struct sock * sk , struct sock * newsk ,
struct request_sock * req , struct sk_buff * skb ,
int family ) ;
2023-10-23 22:21:57 +03:00
int tcp_ao_calc_traffic_key ( struct tcp_ao_key * mkt , u8 * key , void * ctx ,
unsigned int len , struct tcp_sigpool * hp ) ;
2023-10-23 22:22:01 +03:00
void tcp_ao_destroy_sock ( struct sock * sk , bool twsk ) ;
void tcp_ao_time_wait ( struct tcp_timewait_sock * tcptw , struct tcp_sock * tp ) ;
net/tcp: Ignore specific ICMPs for TCP-AO connections
Similarly to IPsec, RFC5925 prescribes:
">> A TCP-AO implementation MUST default to ignore incoming ICMPv4
messages of Type 3 (destination unreachable), Codes 2-4 (protocol
unreachable, port unreachable, and fragmentation needed -- ’hard
errors’), and ICMPv6 Type 1 (destination unreachable), Code 1
(administratively prohibited) and Code 4 (port unreachable) intended
for connections in synchronized states (ESTABLISHED, FIN-WAIT-1, FIN-
WAIT-2, CLOSE-WAIT, CLOSING, LAST-ACK, TIME-WAIT) that match MKTs."
A selftest (later in patch series) verifies that this attack is not
possible in this TCP-AO implementation.
Co-developed-by: Francesco Ruggeri <fruggeri@arista.com>
Signed-off-by: Francesco Ruggeri <fruggeri@arista.com>
Co-developed-by: Salam Noureddine <noureddine@arista.com>
Signed-off-by: Salam Noureddine <noureddine@arista.com>
Signed-off-by: Dmitry Safonov <dima@arista.com>
Acked-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-10-23 22:22:08 +03:00
bool tcp_ao_ignore_icmp ( const struct sock * sk , int family , int type , int code ) ;
2023-10-23 22:22:10 +03:00
int tcp_ao_get_mkts ( struct sock * sk , sockptr_t optval , sockptr_t optlen ) ;
int tcp_ao_get_sock_info ( struct sock * sk , sockptr_t optval , sockptr_t optlen ) ;
2023-10-23 22:22:14 +03:00
int tcp_ao_get_repair ( struct sock * sk , sockptr_t optval , sockptr_t optlen ) ;
int tcp_ao_set_repair ( struct sock * sk , sockptr_t optval , unsigned int optlen ) ;
2023-10-23 22:22:04 +03:00
enum skb_drop_reason tcp_inbound_ao_hash ( struct sock * sk ,
const struct sk_buff * skb , unsigned short int family ,
2023-10-23 22:22:13 +03:00
const struct request_sock * req , int l3index ,
2023-10-23 22:22:04 +03:00
const struct tcp_ao_hdr * aoh ) ;
2023-10-23 22:22:06 +03:00
u32 tcp_ao_compute_sne ( u32 next_sne , u32 next_seq , u32 seq ) ;
2023-10-23 22:22:13 +03:00
struct tcp_ao_key * tcp_ao_do_lookup ( const struct sock * sk , int l3index ,
2023-10-23 22:21:56 +03:00
const union tcp_ao_addr * addr ,
int family , int sndid , int rcvid ) ;
2023-10-23 22:22:00 +03:00
int tcp_ao_hash_hdr ( unsigned short family , char * ao_hash ,
struct tcp_ao_key * key , const u8 * tkey ,
const union tcp_ao_addr * daddr ,
const union tcp_ao_addr * saddr ,
const struct tcphdr * th , u32 sne ) ;
int tcp_ao_prepare_reset ( const struct sock * sk , struct sk_buff * skb ,
2023-10-23 22:22:06 +03:00
const struct tcp_ao_hdr * aoh , int l3index , u32 seq ,
2023-10-23 22:22:00 +03:00
struct tcp_ao_key * * key , char * * traffic_key ,
bool * allocated_traffic_key , u8 * keyid , u32 * sne ) ;
2023-10-23 22:21:55 +03:00
/* ipv4 specific functions */
int tcp_v4_parse_ao ( struct sock * sk , int cmd , sockptr_t optval , int optlen ) ;
2023-10-23 22:21:56 +03:00
struct tcp_ao_key * tcp_v4_ao_lookup ( const struct sock * sk , struct sock * addr_sk ,
int sndid , int rcvid ) ;
2023-10-23 22:22:03 +03:00
int tcp_v4_ao_synack_hash ( char * ao_hash , struct tcp_ao_key * mkt ,
struct request_sock * req , const struct sk_buff * skb ,
int hash_offset , u32 sne ) ;
2023-10-23 22:21:57 +03:00
int tcp_v4_ao_calc_key_sk ( struct tcp_ao_key * mkt , u8 * key ,
const struct sock * sk ,
__be32 sisn , __be32 disn , bool send ) ;
2023-10-23 22:22:02 +03:00
int tcp_v4_ao_calc_key_rsk ( struct tcp_ao_key * mkt , u8 * key ,
struct request_sock * req ) ;
struct tcp_ao_key * tcp_v4_ao_lookup_rsk ( const struct sock * sk ,
struct request_sock * req ,
int sndid , int rcvid ) ;
2023-10-23 22:21:58 +03:00
int tcp_v4_ao_hash_skb ( char * ao_hash , struct tcp_ao_key * key ,
const struct sock * sk , const struct sk_buff * skb ,
const u8 * tkey , int hash_offset , u32 sne ) ;
2023-10-23 22:21:55 +03:00
/* ipv6 specific functions */
2023-10-23 22:21:58 +03:00
int tcp_v6_ao_hash_pseudoheader ( struct tcp_sigpool * hp ,
const struct in6_addr * daddr ,
const struct in6_addr * saddr , int nbytes ) ;
2023-10-23 22:22:02 +03:00
int tcp_v6_ao_calc_key_skb ( struct tcp_ao_key * mkt , u8 * key ,
const struct sk_buff * skb , __be32 sisn , __be32 disn ) ;
2023-10-23 22:21:57 +03:00
int tcp_v6_ao_calc_key_sk ( struct tcp_ao_key * mkt , u8 * key ,
const struct sock * sk , __be32 sisn ,
__be32 disn , bool send ) ;
2023-10-23 22:22:02 +03:00
int tcp_v6_ao_calc_key_rsk ( struct tcp_ao_key * mkt , u8 * key ,
struct request_sock * req ) ;
2023-10-23 22:21:56 +03:00
struct tcp_ao_key * tcp_v6_ao_lookup ( const struct sock * sk ,
struct sock * addr_sk , int sndid , int rcvid ) ;
2023-10-23 22:22:02 +03:00
struct tcp_ao_key * tcp_v6_ao_lookup_rsk ( const struct sock * sk ,
struct request_sock * req ,
int sndid , int rcvid ) ;
2023-10-23 22:21:58 +03:00
int tcp_v6_ao_hash_skb ( char * ao_hash , struct tcp_ao_key * key ,
const struct sock * sk , const struct sk_buff * skb ,
const u8 * tkey , int hash_offset , u32 sne ) ;
2023-10-23 22:21:57 +03:00
int tcp_v6_parse_ao ( struct sock * sk , int cmd , sockptr_t optval , int optlen ) ;
2023-10-23 22:22:03 +03:00
int tcp_v6_ao_synack_hash ( char * ao_hash , struct tcp_ao_key * ao_key ,
struct request_sock * req , const struct sk_buff * skb ,
int hash_offset , u32 sne ) ;
2023-10-23 22:21:57 +03:00
void tcp_ao_established ( struct sock * sk ) ;
void tcp_ao_finish_connect ( struct sock * sk , struct sk_buff * skb ) ;
void tcp_ao_connect_init ( struct sock * sk ) ;
2023-10-23 22:21:59 +03:00
void tcp_ao_syncookie ( struct sock * sk , const struct sk_buff * skb ,
2023-11-29 05:29:22 +03:00
struct request_sock * req , unsigned short int family ) ;
2023-10-23 22:21:57 +03:00
# else /* CONFIG_TCP_AO */
2023-10-23 22:21:58 +03:00
static inline int tcp_ao_transmit_skb ( struct sock * sk , struct sk_buff * skb ,
struct tcp_ao_key * key , struct tcphdr * th ,
__u8 * hash_location )
{
return 0 ;
}
2023-10-23 22:22:02 +03:00
static inline void tcp_ao_syncookie ( struct sock * sk , const struct sk_buff * skb ,
2023-11-29 05:29:22 +03:00
struct request_sock * req , unsigned short int family )
2023-10-23 22:22:02 +03:00
{
}
net/tcp: Ignore specific ICMPs for TCP-AO connections
Similarly to IPsec, RFC5925 prescribes:
">> A TCP-AO implementation MUST default to ignore incoming ICMPv4
messages of Type 3 (destination unreachable), Codes 2-4 (protocol
unreachable, port unreachable, and fragmentation needed -- ’hard
errors’), and ICMPv6 Type 1 (destination unreachable), Code 1
(administratively prohibited) and Code 4 (port unreachable) intended
for connections in synchronized states (ESTABLISHED, FIN-WAIT-1, FIN-
WAIT-2, CLOSE-WAIT, CLOSING, LAST-ACK, TIME-WAIT) that match MKTs."
A selftest (later in patch series) verifies that this attack is not
possible in this TCP-AO implementation.
Co-developed-by: Francesco Ruggeri <fruggeri@arista.com>
Signed-off-by: Francesco Ruggeri <fruggeri@arista.com>
Co-developed-by: Salam Noureddine <noureddine@arista.com>
Signed-off-by: Salam Noureddine <noureddine@arista.com>
Signed-off-by: Dmitry Safonov <dima@arista.com>
Acked-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-10-23 22:22:08 +03:00
static inline bool tcp_ao_ignore_icmp ( const struct sock * sk , int family ,
int type , int code )
{
return false ;
}
2023-10-23 22:22:04 +03:00
static inline enum skb_drop_reason tcp_inbound_ao_hash ( struct sock * sk ,
const struct sk_buff * skb , unsigned short int family ,
2023-10-23 22:22:13 +03:00
const struct request_sock * req , int l3index ,
const struct tcp_ao_hdr * aoh )
2023-10-23 22:22:04 +03:00
{
return SKB_NOT_DROPPED_YET ;
}
2023-10-23 22:21:56 +03:00
static inline struct tcp_ao_key * tcp_ao_do_lookup ( const struct sock * sk ,
2023-10-23 22:22:13 +03:00
int l3index , const union tcp_ao_addr * addr ,
int family , int sndid , int rcvid )
2023-10-23 22:21:56 +03:00
{
return NULL ;
}
2023-10-23 22:22:01 +03:00
static inline void tcp_ao_destroy_sock ( struct sock * sk , bool twsk )
2023-10-23 22:21:55 +03:00
{
}
2023-10-23 22:21:57 +03:00
static inline void tcp_ao_established ( struct sock * sk )
{
}
static inline void tcp_ao_finish_connect ( struct sock * sk , struct sk_buff * skb )
{
}
2023-10-23 22:22:01 +03:00
static inline void tcp_ao_time_wait ( struct tcp_timewait_sock * tcptw ,
struct tcp_sock * tp )
{
}
2023-10-23 22:21:57 +03:00
static inline void tcp_ao_connect_init ( struct sock * sk )
{
}
2023-10-23 22:22:10 +03:00
static inline int tcp_ao_get_mkts ( struct sock * sk , sockptr_t optval , sockptr_t optlen )
{
return - ENOPROTOOPT ;
}
static inline int tcp_ao_get_sock_info ( struct sock * sk , sockptr_t optval , sockptr_t optlen )
{
return - ENOPROTOOPT ;
}
2023-10-23 22:22:14 +03:00
static inline int tcp_ao_get_repair ( struct sock * sk ,
sockptr_t optval , sockptr_t optlen )
{
return - ENOPROTOOPT ;
}
static inline int tcp_ao_set_repair ( struct sock * sk ,
sockptr_t optval , unsigned int optlen )
{
return - ENOPROTOOPT ;
}
2023-10-23 22:21:55 +03:00
# endif
2023-10-23 22:21:59 +03:00
# if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
int tcp_do_parse_auth_options ( const struct tcphdr * th ,
const u8 * * md5_hash , const u8 * * ao_hash ) ;
# else
static inline int tcp_do_parse_auth_options ( const struct tcphdr * th ,
const u8 * * md5_hash , const u8 * * ao_hash )
{
* md5_hash = NULL ;
* ao_hash = NULL ;
return 0 ;
}
# endif
2023-10-23 22:21:54 +03:00
# endif /* _TCP_AO_H */