2005-04-17 02:20:36 +04:00
/*
* INET An implementation of the TCP / IP protocol suite for the LINUX
* operating system . INET is implemented using the BSD Socket
* interface as the means of communication with the user level .
*
* Definitions for the UDP module .
*
* Version : @ ( # ) udp . h 1.0 .2 05 / 07 / 93
*
2005-05-06 03:16:16 +04:00
* Authors : Ross Biro
2005-04-17 02:20:36 +04:00
* Fred N . van Kempen , < waltje @ uWalt . NL . Mugnet . ORG >
*
* Fixes :
* Alan Cox : Turned on udp checksums . I don ' t want to
* chase ' memory corruption ' bugs that aren ' t !
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# ifndef _UDP_H
# define _UDP_H
# include <linux/list.h>
2011-11-24 05:12:59 +04:00
# include <linux/bug.h>
2005-12-27 07:43:12 +03:00
# include <net/inet_sock.h>
2005-04-17 02:20:36 +04:00
# include <net/sock.h>
# include <net/snmp.h>
2006-11-27 22:10:57 +03:00
# include <net/ip.h>
# include <linux/ipv6.h>
2005-04-17 02:20:36 +04:00
# include <linux/seq_file.h>
2006-10-20 01:23:57 +04:00
# include <linux/poll.h>
2005-04-17 02:20:36 +04:00
2006-11-27 22:10:57 +03:00
/**
* struct udp_skb_cb - UDP ( - Lite ) private variables
*
* @ header : private variables used by IPv4 / IPv6
* @ cscov : checksum coverage length ( UDP - Lite only )
* @ partial_cov : if set indicates partial csum coverage
*/
struct udp_skb_cb {
union {
struct inet_skb_parm h4 ;
2011-12-10 13:48:31 +04:00
# if IS_ENABLED(CONFIG_IPV6)
2006-11-27 22:10:57 +03:00
struct inet6_skb_parm h6 ;
# endif
} header ;
__u16 cscov ;
__u8 partial_cov ;
} ;
# define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
2005-04-17 02:20:36 +04:00
2009-11-08 13:17:05 +03:00
/**
* struct udp_hslot - UDP hash slot
*
* @ head : head of list of sockets
* @ count : number of sockets in ' head ' list
* @ lock : spinlock protecting changes to head / count
*/
2008-10-29 11:41:45 +03:00
struct udp_hslot {
2016-04-01 18:52:13 +03:00
struct hlist_head head ;
2009-11-08 13:17:05 +03:00
int count ;
2008-10-29 11:41:45 +03:00
spinlock_t lock ;
} __attribute__ ( ( aligned ( 2 * sizeof ( long ) ) ) ) ;
2009-10-07 04:37:59 +04:00
2009-11-08 13:17:58 +03:00
/**
* struct udp_table - UDP table
*
* @ hash : hash table , sockets are hashed on ( local port )
* @ hash2 : hash table , sockets are hashed on ( local port , local address )
* @ mask : number of slots in hash tables , minus 1
* @ log : log2 ( number of slots in hash table )
*/
2008-10-29 11:41:45 +03:00
struct udp_table {
2009-10-07 04:37:59 +04:00
struct udp_hslot * hash ;
2009-11-08 13:17:58 +03:00
struct udp_hslot * hash2 ;
unsigned int mask ;
unsigned int log ;
2008-10-29 11:41:45 +03:00
} ;
extern struct udp_table udp_table ;
2013-09-23 22:33:36 +04:00
void udp_table_init ( struct udp_table * , const char * ) ;
2009-10-07 04:37:59 +04:00
static inline struct udp_hslot * udp_hashslot ( struct udp_table * table ,
2012-04-15 09:58:06 +04:00
struct net * net , unsigned int num )
2009-10-07 04:37:59 +04:00
{
return & table - > hash [ udp_hashfn ( net , num , table - > mask ) ] ;
}
2009-11-08 13:17:58 +03:00
/*
* For secondary hash , net_hash_mix ( ) is performed before calling
* udp_hashslot2 ( ) , this explains difference with udp_hashslot ( )
*/
static inline struct udp_hslot * udp_hashslot2 ( struct udp_table * table ,
unsigned int hash )
{
return & table - > hash2 [ hash & table - > mask ] ;
}
2005-04-17 02:20:36 +04:00
extern struct proto udp_prot ;
2010-11-10 02:24:26 +03:00
extern atomic_long_t udp_memory_allocated ;
2007-12-31 11:29:24 +03:00
/* sysctl variables for udp */
2010-11-10 02:24:26 +03:00
extern long sysctl_udp_mem [ 3 ] ;
2007-12-31 11:29:24 +03:00
extern int sysctl_udp_rmem_min ;
extern int sysctl_udp_wmem_min ;
2005-12-27 07:43:12 +03:00
struct sk_buff ;
2005-04-17 02:20:36 +04:00
2006-11-27 22:10:57 +03:00
/*
* Generic checksumming routines for UDP ( - Lite ) v4 and v6
*/
2006-11-15 08:40:42 +03:00
static inline __sum16 __udp_lib_checksum_complete ( struct sk_buff * skb )
2006-11-27 22:10:57 +03:00
{
2014-06-15 10:24:20 +04:00
return ( UDP_SKB_CB ( skb ) - > cscov = = skb - > len ?
__skb_checksum_complete ( skb ) :
__skb_checksum_complete_head ( skb , UDP_SKB_CB ( skb ) - > cscov ) ) ;
2006-11-27 22:10:57 +03:00
}
2006-11-21 05:06:37 +03:00
static inline int udp_lib_checksum_complete ( struct sk_buff * skb )
2006-11-27 22:10:57 +03:00
{
2007-04-09 22:59:39 +04:00
return ! skb_csum_unnecessary ( skb ) & &
2006-11-27 22:10:57 +03:00
__udp_lib_checksum_complete ( skb ) ;
}
/**
* udp_csum_outgoing - compute UDPv4 / v6 checksum over fragments
* @ sk : socket we are writing to
* @ skb : sk_buff containing the filled - in UDP header
* ( checksum field must be zeroed out )
*/
2006-11-15 08:35:48 +03:00
static inline __wsum udp_csum_outgoing ( struct sock * sk , struct sk_buff * skb )
2006-11-27 22:10:57 +03:00
{
2007-04-26 05:04:18 +04:00
__wsum csum = csum_partial ( skb_transport_header ( skb ) ,
sizeof ( struct udphdr ) , 0 ) ;
2006-11-27 22:10:57 +03:00
skb_queue_walk ( & sk - > sk_write_queue , skb ) {
csum = csum_add ( csum , skb - > csum ) ;
}
return csum ;
}
2011-03-01 05:36:48 +03:00
static inline __wsum udp_csum ( struct sk_buff * skb )
{
__wsum csum = csum_partial ( skb_transport_header ( skb ) ,
sizeof ( struct udphdr ) , skb - > csum ) ;
for ( skb = skb_shinfo ( skb ) - > frag_list ; skb ; skb = skb - > next ) {
csum = csum_add ( csum , skb - > csum ) ;
}
return csum ;
}
2014-06-05 04:19:48 +04:00
static inline __sum16 udp_v4_check ( int len , __be32 saddr ,
__be32 daddr , __wsum base )
{
return csum_tcpudp_magic ( saddr , daddr , len , IPPROTO_UDP , base ) ;
}
void udp_set_csum ( bool nocheck , struct sk_buff * skb ,
__be32 saddr , __be32 daddr , int len ) ;
2016-04-05 19:41:15 +03:00
static inline void udp_csum_pull_header ( struct sk_buff * skb )
{
if ( skb - > ip_summed = = CHECKSUM_NONE )
skb - > csum = csum_partial ( udp_hdr ( skb ) , sizeof ( struct udphdr ) ,
skb - > csum ) ;
skb_pull_rcsum ( skb , sizeof ( struct udphdr ) ) ;
UDP_SKB_CB ( skb ) - > cscov - = sizeof ( struct udphdr ) ;
}
2016-04-05 18:22:51 +03:00
typedef struct sock * ( * udp_lookup_t ) ( struct sk_buff * skb , __be16 sport ,
__be16 dport ) ;
2014-08-23 00:34:44 +04:00
struct sk_buff * * udp_gro_receive ( struct sk_buff * * head , struct sk_buff * skb ,
2016-04-05 18:22:51 +03:00
struct udphdr * uh , udp_lookup_t lookup ) ;
int udp_gro_complete ( struct sk_buff * skb , int nhoff , udp_lookup_t lookup ) ;
2014-08-23 00:34:44 +04:00
static inline struct udphdr * udp_gro_udphdr ( struct sk_buff * skb )
{
struct udphdr * uh ;
unsigned int hlen , off ;
off = skb_gro_offset ( skb ) ;
hlen = off + sizeof ( * uh ) ;
uh = skb_gro_header_fast ( skb , off ) ;
if ( skb_gro_header_hard ( skb , hlen ) )
uh = skb_gro_header_slow ( skb , hlen , off ) ;
return uh ;
}
2006-11-27 22:10:57 +03:00
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
2016-02-10 19:50:35 +03:00
static inline int udp_lib_hash ( struct sock * sk )
2006-11-27 22:10:57 +03:00
{
BUG ( ) ;
2016-02-10 19:50:35 +03:00
return 0 ;
2006-11-27 22:10:57 +03:00
}
2013-09-23 22:33:36 +04:00
void udp_lib_unhash ( struct sock * sk ) ;
void udp_lib_rehash ( struct sock * sk , u16 new_hash ) ;
2006-11-27 22:10:57 +03:00
static inline void udp_lib_close ( struct sock * sk , long timeout )
{
sk_common_release ( sk ) ;
}
2013-09-23 22:33:36 +04:00
int udp_lib_get_port ( struct sock * sk , unsigned short snum ,
2016-01-05 01:41:46 +03:00
int ( * ) ( const struct sock * , const struct sock * , bool ) ,
2013-09-23 22:33:36 +04:00
unsigned int hash2_nulladdr ) ;
2006-11-27 22:10:57 +03:00
2015-02-24 20:17:31 +03:00
u32 udp_flow_hashrnd ( void ) ;
2014-07-02 08:32:39 +04:00
static inline __be16 udp_flow_src_port ( struct net * net , struct sk_buff * skb ,
int min , int max , bool use_eth )
{
u32 hash ;
if ( min > = max ) {
/* Use default range */
inet_get_local_port_range ( net , & min , & max ) ;
}
hash = skb_get_hash ( skb ) ;
2015-02-24 20:17:31 +03:00
if ( unlikely ( ! hash ) ) {
if ( use_eth ) {
/* Can't find a normal hash, caller has indicated an
* Ethernet packet so use that to compute a hash .
*/
hash = jhash ( skb - > data , 2 * ETH_ALEN ,
( __force u32 ) skb - > protocol ) ;
} else {
/* Can't derive any sort of hash for the packet, set
* to some consistent random value .
*/
hash = udp_flow_hashrnd ( ) ;
}
2014-07-02 08:32:39 +04:00
}
/* Since this is being sent on the wire obfuscate hash a bit
* to minimize possbility that any useful information to an
* attacker is leaked . Only upper 16 bits are relevant in the
* computation for 16 bit port value .
*/
hash ^ = hash < < 16 ;
return htons ( ( ( ( u64 ) hash * ( max - min ) ) > > 32 ) + min ) ;
}
2006-11-27 22:10:57 +03:00
/* net/ipv4/udp.c */
2013-10-07 20:01:39 +04:00
void udp_v4_early_demux ( struct sk_buff * skb ) ;
2013-09-23 22:33:36 +04:00
int udp_get_port ( struct sock * sk , unsigned short snum ,
int ( * saddr_cmp ) ( const struct sock * ,
const struct sock * ) ) ;
void udp_err ( struct sk_buff * , u32 ) ;
2015-03-02 10:37:48 +03:00
int udp_sendmsg ( struct sock * sk , struct msghdr * msg , size_t len ) ;
2013-09-23 22:33:36 +04:00
int udp_push_pending_frames ( struct sock * sk ) ;
void udp_flush_pending_frames ( struct sock * sk ) ;
void udp4_hwcsum ( struct sk_buff * skb , __be32 src , __be32 dst ) ;
int udp_rcv ( struct sk_buff * skb ) ;
int udp_ioctl ( struct sock * sk , int cmd , unsigned long arg ) ;
int udp_disconnect ( struct sock * sk , int flags ) ;
unsigned int udp_poll ( struct file * file , struct socket * sock , poll_table * wait ) ;
struct sk_buff * skb_udp_tunnel_segment ( struct sk_buff * skb ,
2014-09-30 07:22:29 +04:00
netdev_features_t features ,
bool is_ipv6 ) ;
2013-09-23 22:33:36 +04:00
int udp_lib_getsockopt ( struct sock * sk , int level , int optname ,
char __user * optval , int __user * optlen ) ;
int udp_lib_setsockopt ( struct sock * sk , int level , int optname ,
char __user * optval , unsigned int optlen ,
int ( * push_pending_frames ) ( struct sock * ) ) ;
struct sock * udp4_lib_lookup ( struct net * net , __be32 saddr , __be16 sport ,
__be32 daddr , __be16 dport , int dif ) ;
struct sock * __udp4_lib_lookup ( struct net * net , __be32 saddr , __be16 sport ,
__be32 daddr , __be16 dport , int dif ,
2016-01-05 01:41:47 +03:00
struct udp_table * tbl , struct sk_buff * skb ) ;
2016-04-05 18:22:50 +03:00
struct sock * udp4_lib_lookup_skb ( struct sk_buff * skb ,
__be16 sport , __be16 dport ) ;
2013-09-23 22:33:36 +04:00
struct sock * udp6_lib_lookup ( struct net * net ,
const struct in6_addr * saddr , __be16 sport ,
const struct in6_addr * daddr , __be16 dport ,
int dif ) ;
struct sock * __udp6_lib_lookup ( struct net * net ,
const struct in6_addr * saddr , __be16 sport ,
const struct in6_addr * daddr , __be16 dport ,
2016-01-05 01:41:47 +03:00
int dif , struct udp_table * tbl ,
struct sk_buff * skb ) ;
2016-04-05 18:22:50 +03:00
struct sock * udp6_lib_lookup_skb ( struct sk_buff * skb ,
__be16 sport , __be16 dport ) ;
2008-10-01 18:48:10 +04:00
2006-11-27 22:10:57 +03:00
/*
* SNMP statistics for UDP and UDP - Lite
*/
net: snmp: kill various STATS_USER() helpers
In the old days (before linux-3.0), SNMP counters were duplicated,
one for user context, and one for BH context.
After commit 8f0ea0fe3a03 ("snmp: reduce percpu needs by 50%")
we have a single copy, and what really matters is preemption being
enabled or disabled, since we use this_cpu_inc() or __this_cpu_inc()
respectively.
We therefore kill SNMP_INC_STATS_USER(), SNMP_ADD_STATS_USER(),
NET_INC_STATS_USER(), NET_ADD_STATS_USER(), SCTP_INC_STATS_USER(),
SNMP_INC_STATS64_USER(), SNMP_ADD_STATS64_USER(), TCP_ADD_STATS_USER(),
UDP_INC_STATS_USER(), UDP6_INC_STATS_USER(), and XFRM_INC_STATS_USER()
Following patches will rename __BH helpers to make clear their
usage is not tied to BH being disabled.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-28 02:44:27 +03:00
# define UDP_INC_STATS(net, field, is_udplite) do { \
if ( is_udplite ) SNMP_INC_STATS ( ( net ) - > mib . udplite_statistics , field ) ; \
else SNMP_INC_STATS ( ( net ) - > mib . udp_statistics , field ) ; } while ( 0 )
2016-04-28 02:44:30 +03:00
# define __UDP_INC_STATS(net, field, is_udplite) do { \
2008-07-18 15:03:45 +04:00
if ( is_udplite ) SNMP_INC_STATS_BH ( ( net ) - > mib . udplite_statistics , field ) ; \
2008-07-18 15:03:27 +04:00
else SNMP_INC_STATS_BH ( ( net ) - > mib . udp_statistics , field ) ; } while ( 0 )
2005-04-17 02:20:36 +04:00
2016-04-28 02:44:30 +03:00
# define __UDP6_INC_STATS(net, field, is_udplite) do { \
2008-10-08 01:50:06 +04:00
if ( is_udplite ) SNMP_INC_STATS_BH ( ( net ) - > mib . udplite_stats_in6 , field ) ; \
2008-10-08 01:49:36 +04:00
else SNMP_INC_STATS_BH ( ( net ) - > mib . udp_stats_in6 , field ) ; \
} while ( 0 )
net: snmp: kill various STATS_USER() helpers
In the old days (before linux-3.0), SNMP counters were duplicated,
one for user context, and one for BH context.
After commit 8f0ea0fe3a03 ("snmp: reduce percpu needs by 50%")
we have a single copy, and what really matters is preemption being
enabled or disabled, since we use this_cpu_inc() or __this_cpu_inc()
respectively.
We therefore kill SNMP_INC_STATS_USER(), SNMP_ADD_STATS_USER(),
NET_INC_STATS_USER(), NET_ADD_STATS_USER(), SCTP_INC_STATS_USER(),
SNMP_INC_STATS64_USER(), SNMP_ADD_STATS64_USER(), TCP_ADD_STATS_USER(),
UDP_INC_STATS_USER(), UDP6_INC_STATS_USER(), and XFRM_INC_STATS_USER()
Following patches will rename __BH helpers to make clear their
usage is not tied to BH being disabled.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-28 02:44:27 +03:00
# define UDP6_INC_STATS(net, field, __lite) do { \
if ( __lite ) SNMP_INC_STATS ( ( net ) - > mib . udplite_stats_in6 , field ) ; \
else SNMP_INC_STATS ( ( net ) - > mib . udp_stats_in6 , field ) ; \
2008-10-08 01:49:36 +04:00
} while ( 0 )
2007-12-11 22:30:32 +03:00
2011-12-10 13:48:31 +04:00
# if IS_ENABLED(CONFIG_IPV6)
2016-04-28 02:44:30 +03:00
# define __UDPX_INC_STATS(sk, field) \
2013-11-06 02:13:47 +04:00
do { \
if ( ( sk ) - > sk_family = = AF_INET ) \
2016-04-28 02:44:30 +03:00
__UDP_INC_STATS ( sock_net ( sk ) , field , 0 ) ; \
2013-11-06 02:13:47 +04:00
else \
2016-04-28 02:44:30 +03:00
__UDP6_INC_STATS ( sock_net ( sk ) , field , 0 ) ; \
2013-11-06 02:13:47 +04:00
} while ( 0 )
2007-12-11 22:30:32 +03:00
# else
2016-04-28 02:44:30 +03:00
# define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
2007-12-11 22:30:32 +03:00
# endif
2005-04-17 02:20:36 +04:00
/* /proc */
2011-10-30 10:46:30 +04:00
int udp_seq_open ( struct inode * inode , struct file * file ) ;
2005-04-17 02:20:36 +04:00
struct udp_seq_afinfo {
2011-10-30 10:46:30 +04:00
char * name ;
sa_family_t family ;
struct udp_table * udp_table ;
const struct file_operations * seq_fops ;
struct seq_operations seq_ops ;
2005-04-17 02:20:36 +04:00
} ;
struct udp_iter_state {
2008-03-29 04:23:33 +03:00
struct seq_net_private p ;
2005-04-17 02:20:36 +04:00
sa_family_t family ;
int bucket ;
2008-10-29 11:41:45 +03:00
struct udp_table * udp_table ;
2005-04-17 02:20:36 +04:00
} ;
2005-08-16 09:18:02 +04:00
# ifdef CONFIG_PROC_FS
2013-09-23 22:33:36 +04:00
int udp_proc_register ( struct net * net , struct udp_seq_afinfo * afinfo ) ;
void udp_proc_unregister ( struct net * net , struct udp_seq_afinfo * afinfo ) ;
2005-08-16 09:18:02 +04:00
2013-09-23 22:33:36 +04:00
int udp4_proc_init ( void ) ;
void udp4_proc_exit ( void ) ;
2005-08-16 09:18:02 +04:00
# endif
2007-12-31 11:29:24 +03:00
2013-09-23 22:33:36 +04:00
int udpv4_offload_init ( void ) ;
2013-06-08 14:56:03 +04:00
2013-09-23 22:33:36 +04:00
void udp_init ( void ) ;
2009-07-09 12:09:47 +04:00
2013-09-23 22:33:36 +04:00
void udp_encap_enable ( void ) ;
2012-04-27 12:24:08 +04:00
# if IS_ENABLED(CONFIG_IPV6)
2013-09-23 22:33:36 +04:00
void udpv6_encap_enable ( void ) ;
2012-04-27 12:24:08 +04:00
# endif
2005-04-17 02:20:36 +04:00
# endif /* _UDP_H */