2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2016-05-09 01:55:48 +03:00
/* GTP according to GSM TS 09.60 / 3GPP TS 29.060
*
* ( C ) 2012 - 2014 by sysmocom - s . f . m . c . GmbH
* ( C ) 2016 by Pablo Neira Ayuso < pablo @ netfilter . org >
*
* Author : Harald Welte < hwelte @ sysmocom . de >
* Pablo Neira Ayuso < pablo @ netfilter . org >
* Andreas Schultz < aschultz @ travelping . com >
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
# include <linux/module.h>
# include <linux/skbuff.h>
# include <linux/udp.h>
# include <linux/rculist.h>
# include <linux/jhash.h>
# include <linux/if_tunnel.h>
# include <linux/net.h>
# include <linux/file.h>
# include <linux/gtp.h>
# include <net/net_namespace.h>
# include <net/protocol.h>
# include <net/ip.h>
# include <net/udp.h>
# include <net/udp_tunnel.h>
# include <net/icmp.h>
# include <net/xfrm.h>
# include <net/genetlink.h>
# include <net/netns/generic.h>
# include <net/gtp.h>
/* An active session for the subscriber. */
struct pdp_ctx {
struct hlist_node hlist_tid ;
struct hlist_node hlist_addr ;
union {
struct {
u64 tid ;
u16 flow ;
} v0 ;
struct {
u32 i_tei ;
u32 o_tei ;
} v1 ;
} u ;
u8 gtp_version ;
u16 af ;
struct in_addr ms_addr_ip4 ;
2017-03-25 01:23:20 +03:00
struct in_addr peer_addr_ip4 ;
2016-05-09 01:55:48 +03:00
2017-03-09 19:43:02 +03:00
struct sock * sk ;
2017-03-09 19:42:59 +03:00
struct net_device * dev ;
2016-05-09 01:55:48 +03:00
atomic_t tx_seq ;
struct rcu_head rcu_head ;
} ;
/* One instance of the GTP device. */
struct gtp_dev {
struct list_head list ;
2017-03-09 19:42:56 +03:00
struct sock * sk0 ;
struct sock * sk1u ;
2022-03-04 19:40:42 +03:00
u8 sk_created ;
2016-05-09 01:55:48 +03:00
struct net_device * dev ;
2022-03-04 19:40:42 +03:00
struct net * net ;
2016-05-09 01:55:48 +03:00
2017-03-25 01:23:21 +03:00
unsigned int role ;
2016-05-09 01:55:48 +03:00
unsigned int hash_size ;
struct hlist_head * tid_hash ;
struct hlist_head * addr_hash ;
2022-03-04 19:40:43 +03:00
u8 restart_count ;
2016-05-09 01:55:48 +03:00
} ;
2022-03-04 19:40:44 +03:00
struct echo_info {
struct in_addr ms_addr_ip4 ;
struct in_addr peer_addr_ip4 ;
u8 gtp_version ;
} ;
netns: make struct pernet_operations::id unsigned int
Make struct pernet_operations::id unsigned.
There are 2 reasons to do so:
1)
This field is really an index into an zero based array and
thus is unsigned entity. Using negative value is out-of-bound
access by definition.
2)
On x86_64 unsigned 32-bit data which are mixed with pointers
via array indexing or offsets added or subtracted to pointers
are preffered to signed 32-bit data.
"int" being used as an array index needs to be sign-extended
to 64-bit before being used.
void f(long *p, int i)
{
g(p[i]);
}
roughly translates to
movsx rsi, esi
mov rdi, [rsi+...]
call g
MOVSX is 3 byte instruction which isn't necessary if the variable is
unsigned because x86_64 is zero extending by default.
Now, there is net_generic() function which, you guessed it right, uses
"int" as an array index:
static inline void *net_generic(const struct net *net, int id)
{
...
ptr = ng->ptr[id - 1];
...
}
And this function is used a lot, so those sign extensions add up.
Patch snipes ~1730 bytes on allyesconfig kernel (without all junk
messing with code generation):
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
Unfortunately some functions actually grow bigger.
This is a semmingly random artefact of code generation with register
allocator being used differently. gcc decides that some variable
needs to live in new r8+ registers and every access now requires REX
prefix. Or it is shifted into r12, so [r12+0] addressing mode has to be
used which is longer than [r8]
However, overall balance is in negative direction:
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
function old new delta
nfsd4_lock 3886 3959 +73
tipc_link_build_proto_msg 1096 1140 +44
mac80211_hwsim_new_radio 2776 2808 +32
tipc_mon_rcv 1032 1058 +26
svcauth_gss_legacy_init 1413 1429 +16
tipc_bcbase_select_primary 379 392 +13
nfsd4_exchange_id 1247 1260 +13
nfsd4_setclientid_confirm 782 793 +11
...
put_client_renew_locked 494 480 -14
ip_set_sockfn_get 730 716 -14
geneve_sock_add 829 813 -16
nfsd4_sequence_done 721 703 -18
nlmclnt_lookup_host 708 686 -22
nfsd4_lockt 1085 1063 -22
nfs_get_client 1077 1050 -27
tcf_bpf_init 1106 1076 -30
nfsd4_encode_fattr 5997 5930 -67
Total: Before=154856051, After=154854321, chg -0.00%
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-17 04:58:21 +03:00
static unsigned int gtp_net_id __read_mostly ;
2016-05-09 01:55:48 +03:00
struct gtp_net {
struct list_head gtp_dev_list ;
} ;
static u32 gtp_h_initval ;
2022-03-04 19:40:44 +03:00
static struct genl_family gtp_genl_family ;
enum gtp_multicast_groups {
GTP_GENL_MCGRP ,
} ;
static const struct genl_multicast_group gtp_genl_mcgrps [ ] = {
[ GTP_GENL_MCGRP ] = { . name = GTP_GENL_MCGRP_NAME } ,
} ;
2017-03-09 19:43:01 +03:00
static void pdp_context_delete ( struct pdp_ctx * pctx ) ;
2016-05-09 01:55:48 +03:00
static inline u32 gtp0_hashfn ( u64 tid )
{
u32 * tid32 = ( u32 * ) & tid ;
return jhash_2words ( tid32 [ 0 ] , tid32 [ 1 ] , gtp_h_initval ) ;
}
static inline u32 gtp1u_hashfn ( u32 tid )
{
return jhash_1word ( tid , gtp_h_initval ) ;
}
static inline u32 ipv4_hashfn ( __be32 ip )
{
return jhash_1word ( ( __force u32 ) ip , gtp_h_initval ) ;
}
/* Resolve a PDP context structure based on the 64bit TID. */
static struct pdp_ctx * gtp0_pdp_find ( struct gtp_dev * gtp , u64 tid )
{
struct hlist_head * head ;
struct pdp_ctx * pdp ;
head = & gtp - > tid_hash [ gtp0_hashfn ( tid ) % gtp - > hash_size ] ;
hlist_for_each_entry_rcu ( pdp , head , hlist_tid ) {
if ( pdp - > gtp_version = = GTP_V0 & &
pdp - > u . v0 . tid = = tid )
return pdp ;
}
return NULL ;
}
/* Resolve a PDP context structure based on the 32bit TEI. */
static struct pdp_ctx * gtp1_pdp_find ( struct gtp_dev * gtp , u32 tid )
{
struct hlist_head * head ;
struct pdp_ctx * pdp ;
head = & gtp - > tid_hash [ gtp1u_hashfn ( tid ) % gtp - > hash_size ] ;
hlist_for_each_entry_rcu ( pdp , head , hlist_tid ) {
if ( pdp - > gtp_version = = GTP_V1 & &
pdp - > u . v1 . i_tei = = tid )
return pdp ;
}
return NULL ;
}
/* Resolve a PDP context based on IPv4 address of MS. */
static struct pdp_ctx * ipv4_pdp_find ( struct gtp_dev * gtp , __be32 ms_addr )
{
struct hlist_head * head ;
struct pdp_ctx * pdp ;
head = & gtp - > addr_hash [ ipv4_hashfn ( ms_addr ) % gtp - > hash_size ] ;
hlist_for_each_entry_rcu ( pdp , head , hlist_addr ) {
if ( pdp - > af = = AF_INET & &
pdp - > ms_addr_ip4 . s_addr = = ms_addr )
return pdp ;
}
return NULL ;
}
2017-03-25 01:23:21 +03:00
static bool gtp_check_ms_ipv4 ( struct sk_buff * skb , struct pdp_ctx * pctx ,
unsigned int hdrlen , unsigned int role )
2016-05-09 01:55:48 +03:00
{
struct iphdr * iph ;
if ( ! pskb_may_pull ( skb , hdrlen + sizeof ( struct iphdr ) ) )
return false ;
2016-12-16 00:35:52 +03:00
iph = ( struct iphdr * ) ( skb - > data + hdrlen ) ;
2016-05-09 01:55:48 +03:00
2017-03-25 01:23:21 +03:00
if ( role = = GTP_ROLE_SGSN )
return iph - > daddr = = pctx - > ms_addr_ip4 . s_addr ;
else
return iph - > saddr = = pctx - > ms_addr_ip4 . s_addr ;
2016-05-09 01:55:48 +03:00
}
2017-03-25 01:23:21 +03:00
/* Check if the inner IP address in this packet is assigned to any
2016-05-09 01:55:48 +03:00
* existing mobile subscriber .
*/
2017-03-25 01:23:21 +03:00
static bool gtp_check_ms ( struct sk_buff * skb , struct pdp_ctx * pctx ,
unsigned int hdrlen , unsigned int role )
2016-05-09 01:55:48 +03:00
{
switch ( ntohs ( skb - > protocol ) ) {
case ETH_P_IP :
2017-03-25 01:23:21 +03:00
return gtp_check_ms_ipv4 ( skb , pctx , hdrlen , role ) ;
2016-05-09 01:55:48 +03:00
}
return false ;
}
2021-02-03 10:07:59 +03:00
static int gtp_rx ( struct pdp_ctx * pctx , struct sk_buff * skb ,
unsigned int hdrlen , unsigned int role )
2017-03-09 19:42:59 +03:00
{
2021-02-03 10:07:59 +03:00
if ( ! gtp_check_ms ( skb , pctx , hdrlen , role ) ) {
netdev_dbg ( pctx - > dev , " No PDP ctx for this MS \n " ) ;
return 1 ;
2017-03-09 19:42:59 +03:00
}
/* Get rid of the GTP + UDP headers. */
2017-03-09 19:43:02 +03:00
if ( iptunnel_pull_header ( skb , hdrlen , skb - > protocol ,
2021-02-03 10:08:05 +03:00
! net_eq ( sock_net ( pctx - > sk ) , dev_net ( pctx - > dev ) ) ) ) {
pctx - > dev - > stats . rx_length_errors + + ;
goto err ;
}
2021-01-10 10:00:21 +03:00
2021-02-03 10:07:59 +03:00
netdev_dbg ( pctx - > dev , " forwarding packet from GGSN to uplink \n " ) ;
2017-03-09 19:42:59 +03:00
/* Now that the UDP and the GTP header have been removed, set up the
* new network header . This is required by the upper layer to
* calculate the transport header .
*/
skb_reset_network_header ( skb ) ;
2021-06-25 16:33:23 +03:00
skb_reset_mac_header ( skb ) ;
2017-03-09 19:42:59 +03:00
2021-02-03 10:07:59 +03:00
skb - > dev = pctx - > dev ;
dev_sw_netstats_rx_add ( pctx - > dev , skb - > len ) ;
2022-02-12 02:38:38 +03:00
__netif_rx ( skb ) ;
2017-03-09 19:42:59 +03:00
return 0 ;
2021-02-03 10:08:05 +03:00
err :
pctx - > dev - > stats . rx_dropped + + ;
return - 1 ;
2017-03-09 19:42:59 +03:00
}
2022-03-04 19:40:43 +03:00
static struct rtable * ip4_route_output_gtp ( struct flowi4 * fl4 ,
const struct sock * sk ,
__be32 daddr , __be32 saddr )
{
memset ( fl4 , 0 , sizeof ( * fl4 ) ) ;
fl4 - > flowi4_oif = sk - > sk_bound_dev_if ;
fl4 - > daddr = daddr ;
fl4 - > saddr = saddr ;
fl4 - > flowi4_tos = RT_CONN_FLAGS ( sk ) ;
fl4 - > flowi4_proto = sk - > sk_protocol ;
return ip_route_output_key ( sock_net ( sk ) , fl4 ) ;
}
/* GSM TS 09.60. 7.3
* In all Path Management messages :
* - TID : is not used and shall be set to 0.
* - Flow Label is not used and shall be set to 0
* In signalling messages :
* - number : this field is not yet used in signalling messages .
* It shall be set to 255 by the sender and shall be ignored
* by the receiver
* Returns true if the echo req was correct , false otherwise .
*/
2022-03-04 19:40:44 +03:00
static bool gtp0_validate_echo_hdr ( struct gtp0_header * gtp0 )
2022-03-04 19:40:43 +03:00
{
return ! ( gtp0 - > tid | | ( gtp0 - > flags ^ 0x1e ) | |
gtp0 - > number ! = 0xff | | gtp0 - > flow ) ;
}
2022-03-04 19:40:44 +03:00
/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
static void gtp0_build_echo_msg ( struct gtp0_header * hdr , __u8 msg_type )
{
int len_pkt , len_hdr ;
hdr - > flags = 0x1e ; /* v0, GTP-non-prime. */
hdr - > type = msg_type ;
/* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID
* are not used and shall be set to 0.
*/
hdr - > flow = 0 ;
hdr - > tid = 0 ;
hdr - > number = 0xff ;
hdr - > spare [ 0 ] = 0xff ;
hdr - > spare [ 1 ] = 0xff ;
hdr - > spare [ 2 ] = 0xff ;
len_pkt = sizeof ( struct gtp0_packet ) ;
len_hdr = sizeof ( struct gtp0_header ) ;
if ( msg_type = = GTP_ECHO_RSP )
hdr - > length = htons ( len_pkt - len_hdr ) ;
else
hdr - > length = 0 ;
}
2022-03-04 19:40:43 +03:00
static int gtp0_send_echo_resp ( struct gtp_dev * gtp , struct sk_buff * skb )
{
struct gtp0_packet * gtp_pkt ;
struct gtp0_header * gtp0 ;
struct rtable * rt ;
struct flowi4 fl4 ;
struct iphdr * iph ;
__be16 seq ;
gtp0 = ( struct gtp0_header * ) ( skb - > data + sizeof ( struct udphdr ) ) ;
2022-03-04 19:40:44 +03:00
if ( ! gtp0_validate_echo_hdr ( gtp0 ) )
2022-03-04 19:40:43 +03:00
return - 1 ;
seq = gtp0 - > seq ;
/* pull GTP and UDP headers */
skb_pull_data ( skb , sizeof ( struct gtp0_header ) + sizeof ( struct udphdr ) ) ;
gtp_pkt = skb_push ( skb , sizeof ( struct gtp0_packet ) ) ;
memset ( gtp_pkt , 0 , sizeof ( struct gtp0_packet ) ) ;
2022-03-04 19:40:44 +03:00
gtp0_build_echo_msg ( & gtp_pkt - > gtp0_h , GTP_ECHO_RSP ) ;
2022-03-04 19:40:43 +03:00
/* GSM TS 09.60. 7.3 The Sequence Number in a signalling response
* message shall be copied from the signalling request message
* that the GSN is replying to .
*/
gtp_pkt - > gtp0_h . seq = seq ;
gtp_pkt - > ie . tag = GTPIE_RECOVERY ;
gtp_pkt - > ie . val = gtp - > restart_count ;
iph = ip_hdr ( skb ) ;
/* find route to the sender,
* src address becomes dst address and vice versa .
*/
rt = ip4_route_output_gtp ( & fl4 , gtp - > sk0 , iph - > saddr , iph - > daddr ) ;
if ( IS_ERR ( rt ) ) {
netdev_dbg ( gtp - > dev , " no route for echo response from %pI4 \n " ,
& iph - > saddr ) ;
return - 1 ;
}
udp_tunnel_xmit_skb ( rt , gtp - > sk0 , skb ,
fl4 . saddr , fl4 . daddr ,
iph - > tos ,
ip4_dst_hoplimit ( & rt - > dst ) ,
0 ,
htons ( GTP0_PORT ) , htons ( GTP0_PORT ) ,
! net_eq ( sock_net ( gtp - > sk1u ) ,
dev_net ( gtp - > dev ) ) ,
false ) ;
return 0 ;
}
2022-03-04 19:40:44 +03:00
static int gtp_genl_fill_echo ( struct sk_buff * skb , u32 snd_portid , u32 snd_seq ,
int flags , u32 type , struct echo_info echo )
{
void * genlh ;
genlh = genlmsg_put ( skb , snd_portid , snd_seq , & gtp_genl_family , flags ,
type ) ;
if ( ! genlh )
goto failure ;
if ( nla_put_u32 ( skb , GTPA_VERSION , echo . gtp_version ) | |
nla_put_be32 ( skb , GTPA_PEER_ADDRESS , echo . peer_addr_ip4 . s_addr ) | |
nla_put_be32 ( skb , GTPA_MS_ADDRESS , echo . ms_addr_ip4 . s_addr ) )
goto failure ;
genlmsg_end ( skb , genlh ) ;
return 0 ;
failure :
genlmsg_cancel ( skb , genlh ) ;
return - EMSGSIZE ;
}
static int gtp0_handle_echo_resp ( struct gtp_dev * gtp , struct sk_buff * skb )
{
struct gtp0_header * gtp0 ;
struct echo_info echo ;
struct sk_buff * msg ;
struct iphdr * iph ;
int ret ;
gtp0 = ( struct gtp0_header * ) ( skb - > data + sizeof ( struct udphdr ) ) ;
if ( ! gtp0_validate_echo_hdr ( gtp0 ) )
return - 1 ;
iph = ip_hdr ( skb ) ;
echo . ms_addr_ip4 . s_addr = iph - > daddr ;
echo . peer_addr_ip4 . s_addr = iph - > saddr ;
echo . gtp_version = GTP_V0 ;
msg = nlmsg_new ( NLMSG_DEFAULT_SIZE , GFP_ATOMIC ) ;
if ( ! msg )
return - ENOMEM ;
ret = gtp_genl_fill_echo ( msg , 0 , 0 , 0 , GTP_CMD_ECHOREQ , echo ) ;
if ( ret < 0 ) {
nlmsg_free ( msg ) ;
return ret ;
}
return genlmsg_multicast_netns ( & gtp_genl_family , dev_net ( gtp - > dev ) ,
msg , 0 , GTP_GENL_MCGRP , GFP_ATOMIC ) ;
}
2016-05-09 01:55:48 +03:00
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
2017-03-09 19:43:02 +03:00
static int gtp0_udp_encap_recv ( struct gtp_dev * gtp , struct sk_buff * skb )
2016-05-09 01:55:48 +03:00
{
unsigned int hdrlen = sizeof ( struct udphdr ) +
sizeof ( struct gtp0_header ) ;
struct gtp0_header * gtp0 ;
2021-02-03 10:07:59 +03:00
struct pdp_ctx * pctx ;
2016-05-09 01:55:48 +03:00
if ( ! pskb_may_pull ( skb , hdrlen ) )
return - 1 ;
gtp0 = ( struct gtp0_header * ) ( skb - > data + sizeof ( struct udphdr ) ) ;
if ( ( gtp0 - > flags > > 5 ) ! = GTP_V0 )
return 1 ;
2022-03-04 19:40:43 +03:00
/* If the sockets were created in kernel, it means that
* there is no daemon running in userspace which would
* handle echo request .
*/
if ( gtp0 - > type = = GTP_ECHO_REQ & & gtp - > sk_created )
return gtp0_send_echo_resp ( gtp , skb ) ;
2022-03-04 19:40:44 +03:00
if ( gtp0 - > type = = GTP_ECHO_RSP & & gtp - > sk_created )
return gtp0_handle_echo_resp ( gtp , skb ) ;
2021-02-03 10:07:59 +03:00
if ( gtp0 - > type ! = GTP_TPDU )
return 1 ;
pctx = gtp0_pdp_find ( gtp , be64_to_cpu ( gtp0 - > tid ) ) ;
if ( ! pctx ) {
netdev_dbg ( gtp - > dev , " No PDP ctx to decap skb=%p \n " , skb ) ;
return 1 ;
}
return gtp_rx ( pctx , skb , hdrlen , gtp - > role ) ;
2016-05-09 01:55:48 +03:00
}
2022-03-04 19:40:44 +03:00
/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
static void gtp1u_build_echo_msg ( struct gtp1_header_long * hdr , __u8 msg_type )
{
int len_pkt , len_hdr ;
/* S flag must be set to 1 */
hdr - > flags = 0x32 ; /* v1, GTP-non-prime. */
hdr - > type = msg_type ;
/* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */
hdr - > tid = 0 ;
/* seq, npdu and next should be counted to the length of the GTP packet
* that ' s why szie of gtp1_header should be subtracted ,
* not size of gtp1_header_long .
*/
len_hdr = sizeof ( struct gtp1_header ) ;
if ( msg_type = = GTP_ECHO_RSP ) {
len_pkt = sizeof ( struct gtp1u_packet ) ;
hdr - > length = htons ( len_pkt - len_hdr ) ;
} else {
/* GTP_ECHO_REQ does not carry GTP Information Element,
* the why gtp1_header_long is used here .
*/
len_pkt = sizeof ( struct gtp1_header_long ) ;
hdr - > length = htons ( len_pkt - len_hdr ) ;
}
}
2022-03-04 19:40:43 +03:00
static int gtp1u_send_echo_resp ( struct gtp_dev * gtp , struct sk_buff * skb )
{
struct gtp1_header_long * gtp1u ;
struct gtp1u_packet * gtp_pkt ;
struct rtable * rt ;
struct flowi4 fl4 ;
struct iphdr * iph ;
gtp1u = ( struct gtp1_header_long * ) ( skb - > data + sizeof ( struct udphdr ) ) ;
/* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
* Error Indication and Supported Extension Headers Notification
* messages , the S flag shall be set to 1 and TEID shall be set to 0.
*/
if ( ! ( gtp1u - > flags & GTP1_F_SEQ ) | | gtp1u - > tid )
return - 1 ;
/* pull GTP and UDP headers */
skb_pull_data ( skb ,
sizeof ( struct gtp1_header_long ) + sizeof ( struct udphdr ) ) ;
gtp_pkt = skb_push ( skb , sizeof ( struct gtp1u_packet ) ) ;
memset ( gtp_pkt , 0 , sizeof ( struct gtp1u_packet ) ) ;
2022-03-04 19:40:44 +03:00
gtp1u_build_echo_msg ( & gtp_pkt - > gtp1u_h , GTP_ECHO_RSP ) ;
2022-03-04 19:40:43 +03:00
/* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the
* Recovery information element shall not be used , i . e . it shall
* be set to zero by the sender and shall be ignored by the receiver .
* The Recovery information element is mandatory due to backwards
* compatibility reasons .
*/
gtp_pkt - > ie . tag = GTPIE_RECOVERY ;
gtp_pkt - > ie . val = 0 ;
iph = ip_hdr ( skb ) ;
/* find route to the sender,
* src address becomes dst address and vice versa .
*/
rt = ip4_route_output_gtp ( & fl4 , gtp - > sk1u , iph - > saddr , iph - > daddr ) ;
if ( IS_ERR ( rt ) ) {
netdev_dbg ( gtp - > dev , " no route for echo response from %pI4 \n " ,
& iph - > saddr ) ;
return - 1 ;
}
udp_tunnel_xmit_skb ( rt , gtp - > sk1u , skb ,
fl4 . saddr , fl4 . daddr ,
iph - > tos ,
ip4_dst_hoplimit ( & rt - > dst ) ,
0 ,
htons ( GTP1U_PORT ) , htons ( GTP1U_PORT ) ,
! net_eq ( sock_net ( gtp - > sk1u ) ,
dev_net ( gtp - > dev ) ) ,
false ) ;
return 0 ;
}
2022-03-04 19:40:44 +03:00
static int gtp1u_handle_echo_resp ( struct gtp_dev * gtp , struct sk_buff * skb )
{
struct gtp1_header_long * gtp1u ;
struct echo_info echo ;
struct sk_buff * msg ;
struct iphdr * iph ;
int ret ;
gtp1u = ( struct gtp1_header_long * ) ( skb - > data + sizeof ( struct udphdr ) ) ;
/* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
* Error Indication and Supported Extension Headers Notification
* messages , the S flag shall be set to 1 and TEID shall be set to 0.
*/
if ( ! ( gtp1u - > flags & GTP1_F_SEQ ) | | gtp1u - > tid )
return - 1 ;
iph = ip_hdr ( skb ) ;
echo . ms_addr_ip4 . s_addr = iph - > daddr ;
echo . peer_addr_ip4 . s_addr = iph - > saddr ;
echo . gtp_version = GTP_V1 ;
msg = nlmsg_new ( NLMSG_DEFAULT_SIZE , GFP_ATOMIC ) ;
if ( ! msg )
return - ENOMEM ;
ret = gtp_genl_fill_echo ( msg , 0 , 0 , 0 , GTP_CMD_ECHOREQ , echo ) ;
if ( ret < 0 ) {
nlmsg_free ( msg ) ;
return ret ;
}
return genlmsg_multicast_netns ( & gtp_genl_family , dev_net ( gtp - > dev ) ,
msg , 0 , GTP_GENL_MCGRP , GFP_ATOMIC ) ;
}
2017-03-09 19:43:02 +03:00
static int gtp1u_udp_encap_recv ( struct gtp_dev * gtp , struct sk_buff * skb )
2016-05-09 01:55:48 +03:00
{
unsigned int hdrlen = sizeof ( struct udphdr ) +
sizeof ( struct gtp1_header ) ;
struct gtp1_header * gtp1 ;
2021-02-03 10:07:59 +03:00
struct pdp_ctx * pctx ;
2016-05-09 01:55:48 +03:00
if ( ! pskb_may_pull ( skb , hdrlen ) )
return - 1 ;
gtp1 = ( struct gtp1_header * ) ( skb - > data + sizeof ( struct udphdr ) ) ;
if ( ( gtp1 - > flags > > 5 ) ! = GTP_V1 )
return 1 ;
2022-03-04 19:40:43 +03:00
/* If the sockets were created in kernel, it means that
* there is no daemon running in userspace which would
* handle echo request .
*/
if ( gtp1 - > type = = GTP_ECHO_REQ & & gtp - > sk_created )
return gtp1u_send_echo_resp ( gtp , skb ) ;
2022-03-04 19:40:44 +03:00
if ( gtp1 - > type = = GTP_ECHO_RSP & & gtp - > sk_created )
return gtp1u_handle_echo_resp ( gtp , skb ) ;
2021-02-03 10:07:59 +03:00
if ( gtp1 - > type ! = GTP_TPDU )
return 1 ;
2016-05-09 01:55:48 +03:00
/* From 29.060: "This field shall be present if and only if any one or
* more of the S , PN and E flags are set . " .
*
* If any of the bit is set , then the remaining ones also have to be
* set .
*/
2021-02-03 10:07:59 +03:00
if ( gtp1 - > flags & GTP1_F_MASK )
hdrlen + = 4 ;
2016-05-09 01:55:48 +03:00
/* Make sure the header is larger enough, including extensions. */
if ( ! pskb_may_pull ( skb , hdrlen ) )
return - 1 ;
2016-05-10 22:33:38 +03:00
gtp1 = ( struct gtp1_header * ) ( skb - > data + sizeof ( struct udphdr ) ) ;
2021-02-03 10:07:59 +03:00
pctx = gtp1_pdp_find ( gtp , ntohl ( gtp1 - > tid ) ) ;
if ( ! pctx ) {
netdev_dbg ( gtp - > dev , " No PDP ctx to decap skb=%p \n " , skb ) ;
return 1 ;
}
return gtp_rx ( pctx , skb , hdrlen , gtp - > role ) ;
2016-05-09 01:55:48 +03:00
}
2019-07-02 18:22:25 +03:00
static void __gtp_encap_destroy ( struct sock * sk )
2016-05-09 01:55:48 +03:00
{
2017-03-09 19:42:57 +03:00
struct gtp_dev * gtp ;
2016-05-09 01:55:48 +03:00
2019-07-02 18:20:51 +03:00
lock_sock ( sk ) ;
gtp = sk - > sk_user_data ;
2017-03-09 19:42:57 +03:00
if ( gtp ) {
2019-07-02 18:22:25 +03:00
if ( gtp - > sk0 = = sk )
gtp - > sk0 = NULL ;
else
gtp - > sk1u = NULL ;
2017-03-09 19:42:57 +03:00
udp_sk ( sk ) - > encap_type = 0 ;
rcu_assign_sk_user_data ( sk , NULL ) ;
sock_put ( sk ) ;
}
2019-07-02 18:20:51 +03:00
release_sock ( sk ) ;
2016-05-09 01:55:48 +03:00
}
2019-07-02 18:22:25 +03:00
static void gtp_encap_destroy ( struct sock * sk )
{
rtnl_lock ( ) ;
__gtp_encap_destroy ( sk ) ;
rtnl_unlock ( ) ;
}
2017-03-09 19:42:57 +03:00
static void gtp_encap_disable_sock ( struct sock * sk )
2016-05-09 01:55:48 +03:00
{
2017-03-09 19:42:57 +03:00
if ( ! sk )
return ;
2016-05-09 01:55:48 +03:00
2019-07-02 18:22:25 +03:00
__gtp_encap_destroy ( sk ) ;
2017-03-09 19:42:57 +03:00
}
static void gtp_encap_disable ( struct gtp_dev * gtp )
{
2022-03-04 19:40:42 +03:00
if ( gtp - > sk_created ) {
udp_tunnel_sock_release ( gtp - > sk0 - > sk_socket ) ;
udp_tunnel_sock_release ( gtp - > sk1u - > sk_socket ) ;
gtp - > sk_created = false ;
gtp - > sk0 = NULL ;
gtp - > sk1u = NULL ;
} else {
gtp_encap_disable_sock ( gtp - > sk0 ) ;
gtp_encap_disable_sock ( gtp - > sk1u ) ;
}
2016-05-09 01:55:48 +03:00
}
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
* Return codes : 0 : success , < 0 : error , > 0 : pass up to userspace UDP socket .
*/
static int gtp_encap_recv ( struct sock * sk , struct sk_buff * skb )
{
struct gtp_dev * gtp ;
2017-03-09 19:42:59 +03:00
int ret = 0 ;
2016-05-09 01:55:48 +03:00
gtp = rcu_dereference_sk_user_data ( sk ) ;
if ( ! gtp )
return 1 ;
2021-02-03 10:07:59 +03:00
netdev_dbg ( gtp - > dev , " encap_recv sk=%p \n " , sk ) ;
2016-05-09 01:55:48 +03:00
switch ( udp_sk ( sk ) - > encap_type ) {
case UDP_ENCAP_GTP0 :
netdev_dbg ( gtp - > dev , " received GTP0 packet \n " ) ;
2017-03-09 19:43:02 +03:00
ret = gtp0_udp_encap_recv ( gtp , skb ) ;
2016-05-09 01:55:48 +03:00
break ;
case UDP_ENCAP_GTP1U :
netdev_dbg ( gtp - > dev , " received GTP1U packet \n " ) ;
2017-03-09 19:43:02 +03:00
ret = gtp1u_udp_encap_recv ( gtp , skb ) ;
2016-05-09 01:55:48 +03:00
break ;
default :
ret = - 1 ; /* Shouldn't happen. */
}
switch ( ret ) {
case 1 :
netdev_dbg ( gtp - > dev , " pass up to the process \n " ) ;
2017-03-09 19:42:59 +03:00
break ;
2016-05-09 01:55:48 +03:00
case 0 :
break ;
case - 1 :
netdev_dbg ( gtp - > dev , " GTP packet has been dropped \n " ) ;
kfree_skb ( skb ) ;
2017-03-09 19:42:59 +03:00
ret = 0 ;
break ;
2016-05-09 01:55:48 +03:00
}
2017-03-09 19:42:59 +03:00
return ret ;
2016-05-09 01:55:48 +03:00
}
static int gtp_dev_init ( struct net_device * dev )
{
struct gtp_dev * gtp = netdev_priv ( dev ) ;
gtp - > dev = dev ;
2017-08-01 22:11:10 +03:00
dev - > tstats = netdev_alloc_pcpu_stats ( struct pcpu_sw_netstats ) ;
2016-05-09 01:55:48 +03:00
if ( ! dev - > tstats )
return - ENOMEM ;
return 0 ;
}
static void gtp_dev_uninit ( struct net_device * dev )
{
struct gtp_dev * gtp = netdev_priv ( dev ) ;
gtp_encap_disable ( gtp ) ;
free_percpu ( dev - > tstats ) ;
}
static inline void gtp0_push_header ( struct sk_buff * skb , struct pdp_ctx * pctx )
{
int payload_len = skb - > len ;
struct gtp0_header * gtp0 ;
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:23 +03:00
gtp0 = skb_push ( skb , sizeof ( * gtp0 ) ) ;
2016-05-09 01:55:48 +03:00
gtp0 - > flags = 0x1e ; /* v0, GTP-non-prime. */
gtp0 - > type = GTP_TPDU ;
gtp0 - > length = htons ( payload_len ) ;
gtp0 - > seq = htons ( ( atomic_inc_return ( & pctx - > tx_seq ) - 1 ) % 0xffff ) ;
gtp0 - > flow = htons ( pctx - > u . v0 . flow ) ;
gtp0 - > number = 0xff ;
gtp0 - > spare [ 0 ] = gtp0 - > spare [ 1 ] = gtp0 - > spare [ 2 ] = 0xff ;
gtp0 - > tid = cpu_to_be64 ( pctx - > u . v0 . tid ) ;
}
2021-02-03 10:07:59 +03:00
static inline void gtp1_push_header ( struct sk_buff * skb , struct pdp_ctx * pctx )
2016-05-09 01:55:48 +03:00
{
int payload_len = skb - > len ;
struct gtp1_header * gtp1 ;
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:23 +03:00
gtp1 = skb_push ( skb , sizeof ( * gtp1 ) ) ;
2016-05-09 01:55:48 +03:00
/* Bits 8 7 6 5 4 3 2 1
* + - - + - - + - - + - - + - - + - - + - - + - - +
2016-12-16 00:35:53 +03:00
* | version | PT | 0 | E | S | PN |
2016-05-09 01:55:48 +03:00
* + - - + - - + - - + - - + - - + - - + - - + - - +
* 0 0 1 1 1 0 0 0
*/
2016-12-16 00:35:53 +03:00
gtp1 - > flags = 0x30 ; /* v1, GTP-non-prime. */
2016-05-09 01:55:48 +03:00
gtp1 - > type = GTP_TPDU ;
gtp1 - > length = htons ( payload_len ) ;
2021-02-03 10:07:59 +03:00
gtp1 - > tid = htonl ( pctx - > u . v1 . o_tei ) ;
2016-05-09 01:55:48 +03:00
2021-06-01 17:16:25 +03:00
/* TODO: Support for extension header, sequence number and N-PDU.
2016-05-09 01:55:48 +03:00
* Update the length field if any of them is available .
*/
}
2021-02-03 10:07:59 +03:00
struct gtp_pktinfo {
struct sock * sk ;
struct iphdr * iph ;
struct flowi4 fl4 ;
struct rtable * rt ;
struct pdp_ctx * pctx ;
struct net_device * dev ;
__be16 gtph_port ;
} ;
2021-01-10 10:00:21 +03:00
2021-02-03 10:07:59 +03:00
static void gtp_push_header ( struct sk_buff * skb , struct gtp_pktinfo * pktinfo )
{
switch ( pktinfo - > pctx - > gtp_version ) {
case GTP_V0 :
pktinfo - > gtph_port = htons ( GTP0_PORT ) ;
gtp0_push_header ( skb , pktinfo - > pctx ) ;
break ;
case GTP_V1 :
pktinfo - > gtph_port = htons ( GTP1U_PORT ) ;
gtp1_push_header ( skb , pktinfo - > pctx ) ;
break ;
2016-05-09 01:55:48 +03:00
}
}
static inline void gtp_set_pktinfo_ipv4 ( struct gtp_pktinfo * pktinfo ,
2021-02-03 10:07:59 +03:00
struct sock * sk , struct iphdr * iph ,
struct pdp_ctx * pctx , struct rtable * rt ,
2016-05-09 01:55:48 +03:00
struct flowi4 * fl4 ,
struct net_device * dev )
{
pktinfo - > sk = sk ;
2021-02-03 10:07:59 +03:00
pktinfo - > iph = iph ;
pktinfo - > pctx = pctx ;
2016-05-09 01:55:48 +03:00
pktinfo - > rt = rt ;
pktinfo - > fl4 = * fl4 ;
pktinfo - > dev = dev ;
}
static int gtp_build_skb_ip4 ( struct sk_buff * skb , struct net_device * dev ,
struct gtp_pktinfo * pktinfo )
{
struct gtp_dev * gtp = netdev_priv ( dev ) ;
struct pdp_ctx * pctx ;
struct rtable * rt ;
struct flowi4 fl4 ;
2021-02-03 10:07:59 +03:00
struct iphdr * iph ;
__be16 df ;
2016-05-09 01:55:48 +03:00
int mtu ;
2021-02-03 10:07:59 +03:00
/* Read the IP destination address and resolve the PDP context.
* Prepend PDP header with TEI / TID from PDP ctx .
*/
iph = ip_hdr ( skb ) ;
if ( gtp - > role = = GTP_ROLE_SGSN )
pctx = ipv4_pdp_find ( gtp , iph - > saddr ) ;
else
pctx = ipv4_pdp_find ( gtp , iph - > daddr ) ;
2021-01-10 10:00:21 +03:00
2021-02-03 10:07:59 +03:00
if ( ! pctx ) {
netdev_dbg ( dev , " no PDP ctx found for %pI4, skip \n " ,
& iph - > daddr ) ;
return - ENOENT ;
2016-05-09 01:55:48 +03:00
}
2021-02-03 10:07:59 +03:00
netdev_dbg ( dev , " found PDP context %p \n " , pctx ) ;
2016-05-09 01:55:48 +03:00
2022-03-04 19:40:43 +03:00
rt = ip4_route_output_gtp ( & fl4 , pctx - > sk , pctx - > peer_addr_ip4 . s_addr ,
inet_sk ( pctx - > sk ) - > inet_saddr ) ;
2016-05-09 01:55:48 +03:00
if ( IS_ERR ( rt ) ) {
2021-02-03 10:07:59 +03:00
netdev_dbg ( dev , " no route to SSGN %pI4 \n " ,
& pctx - > peer_addr_ip4 . s_addr ) ;
2016-05-09 01:55:48 +03:00
dev - > stats . tx_carrier_errors + + ;
goto err ;
}
if ( rt - > dst . dev = = dev ) {
2021-02-03 10:07:59 +03:00
netdev_dbg ( dev , " circular route to SSGN %pI4 \n " ,
& pctx - > peer_addr_ip4 . s_addr ) ;
2016-05-09 01:55:48 +03:00
dev - > stats . collisions + + ;
goto err_rt ;
}
/* This is similar to tnl_update_pmtu(). */
2021-02-03 10:07:59 +03:00
df = iph - > frag_off ;
2016-05-09 01:55:48 +03:00
if ( df ) {
mtu = dst_mtu ( & rt - > dst ) - dev - > hard_header_len -
sizeof ( struct iphdr ) - sizeof ( struct udphdr ) ;
2021-02-03 10:07:59 +03:00
switch ( pctx - > gtp_version ) {
2016-05-09 01:55:48 +03:00
case GTP_V0 :
mtu - = sizeof ( struct gtp0_header ) ;
break ;
case GTP_V1 :
mtu - = sizeof ( struct gtp1_header ) ;
break ;
}
} else {
mtu = dst_mtu ( & rt - > dst ) ;
}
2021-10-06 06:57:39 +03:00
skb_dst_update_pmtu_no_confirm ( skb , mtu ) ;
2016-05-09 01:55:48 +03:00
2021-02-03 10:07:59 +03:00
if ( ! skb_is_gso ( skb ) & & ( iph - > frag_off & htons ( IP_DF ) ) & &
mtu < ntohs ( iph - > tot_len ) ) {
netdev_dbg ( dev , " packet too big, fragmentation needed \n " ) ;
2020-02-11 22:47:06 +03:00
icmp_ndo_send ( skb , ICMP_DEST_UNREACH , ICMP_FRAG_NEEDED ,
htonl ( mtu ) ) ;
2016-05-09 01:55:48 +03:00
goto err_rt ;
}
2021-02-03 10:07:59 +03:00
gtp_set_pktinfo_ipv4 ( pktinfo , pctx - > sk , iph , pctx , rt , & fl4 , dev ) ;
gtp_push_header ( skb , pktinfo ) ;
2016-05-09 01:55:48 +03:00
return 0 ;
err_rt :
ip_rt_put ( rt ) ;
err :
return - EBADMSG ;
}
static netdev_tx_t gtp_dev_xmit ( struct sk_buff * skb , struct net_device * dev )
{
2021-02-03 10:07:59 +03:00
unsigned int proto = ntohs ( skb - > protocol ) ;
2016-05-09 01:55:48 +03:00
struct gtp_pktinfo pktinfo ;
int err ;
/* Ensure there is sufficient headroom. */
if ( skb_cow_head ( skb , dev - > needed_headroom ) )
goto tx_err ;
skb_reset_inner_headers ( skb ) ;
/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
rcu_read_lock ( ) ;
2021-02-03 10:07:59 +03:00
switch ( proto ) {
case ETH_P_IP :
err = gtp_build_skb_ip4 ( skb , dev , & pktinfo ) ;
break ;
default :
err = - EOPNOTSUPP ;
break ;
}
2016-05-09 01:55:48 +03:00
rcu_read_unlock ( ) ;
if ( err < 0 )
goto tx_err ;
2021-02-03 10:07:59 +03:00
switch ( proto ) {
case ETH_P_IP :
netdev_dbg ( pktinfo . dev , " gtp -> IP src: %pI4 dst: %pI4 \n " ,
& pktinfo . iph - > saddr , & pktinfo . iph - > daddr ) ;
udp_tunnel_xmit_skb ( pktinfo . rt , pktinfo . sk , skb ,
pktinfo . fl4 . saddr , pktinfo . fl4 . daddr ,
pktinfo . iph - > tos ,
ip4_dst_hoplimit ( & pktinfo . rt - > dst ) ,
0 ,
pktinfo . gtph_port , pktinfo . gtph_port ,
2021-02-03 10:08:02 +03:00
! net_eq ( sock_net ( pktinfo . pctx - > sk ) ,
dev_net ( dev ) ) ,
false ) ;
2021-02-03 10:07:59 +03:00
break ;
}
2016-05-09 01:55:48 +03:00
return NETDEV_TX_OK ;
tx_err :
dev - > stats . tx_errors + + ;
dev_kfree_skb ( skb ) ;
return NETDEV_TX_OK ;
}
static const struct net_device_ops gtp_netdev_ops = {
. ndo_init = gtp_dev_init ,
. ndo_uninit = gtp_dev_uninit ,
. ndo_start_xmit = gtp_dev_xmit ,
2020-11-07 23:52:42 +03:00
. ndo_get_stats64 = dev_get_tstats64 ,
2016-05-09 01:55:48 +03:00
} ;
2021-02-03 10:08:04 +03:00
static const struct device_type gtp_type = {
. name = " gtp " ,
} ;
2016-05-09 01:55:48 +03:00
static void gtp_link_setup ( struct net_device * dev )
{
2021-02-03 10:08:00 +03:00
unsigned int max_gtp_header_len = sizeof ( struct iphdr ) +
sizeof ( struct udphdr ) +
sizeof ( struct gtp0_header ) ;
2016-05-09 01:55:48 +03:00
dev - > netdev_ops = & gtp_netdev_ops ;
net: Fix inconsistent teardown and release of private netdev state.
Network devices can allocate reasources and private memory using
netdev_ops->ndo_init(). However, the release of these resources
can occur in one of two different places.
Either netdev_ops->ndo_uninit() or netdev->destructor().
The decision of which operation frees the resources depends upon
whether it is necessary for all netdev refs to be released before it
is safe to perform the freeing.
netdev_ops->ndo_uninit() presumably can occur right after the
NETDEV_UNREGISTER notifier completes and the unicast and multicast
address lists are flushed.
netdev->destructor(), on the other hand, does not run until the
netdev references all go away.
Further complicating the situation is that netdev->destructor()
almost universally does also a free_netdev().
This creates a problem for the logic in register_netdevice().
Because all callers of register_netdevice() manage the freeing
of the netdev, and invoke free_netdev(dev) if register_netdevice()
fails.
If netdev_ops->ndo_init() succeeds, but something else fails inside
of register_netdevice(), it does call ndo_ops->ndo_uninit(). But
it is not able to invoke netdev->destructor().
This is because netdev->destructor() will do a free_netdev() and
then the caller of register_netdevice() will do the same.
However, this means that the resources that would normally be released
by netdev->destructor() will not be.
Over the years drivers have added local hacks to deal with this, by
invoking their destructor parts by hand when register_netdevice()
fails.
Many drivers do not try to deal with this, and instead we have leaks.
Let's close this hole by formalizing the distinction between what
private things need to be freed up by netdev->destructor() and whether
the driver needs unregister_netdevice() to perform the free_netdev().
netdev->priv_destructor() performs all actions to free up the private
resources that used to be freed by netdev->destructor(), except for
free_netdev().
netdev->needs_free_netdev is a boolean that indicates whether
free_netdev() should be done at the end of unregister_netdevice().
Now, register_netdevice() can sanely release all resources after
ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit()
and netdev->priv_destructor().
And at the end of unregister_netdevice(), we invoke
netdev->priv_destructor() and optionally call free_netdev().
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-08 19:52:56 +03:00
dev - > needs_free_netdev = true ;
2021-02-03 10:08:04 +03:00
SET_NETDEV_DEVTYPE ( dev , & gtp_type ) ;
2016-05-09 01:55:48 +03:00
dev - > hard_header_len = 0 ;
dev - > addr_len = 0 ;
2021-02-03 10:08:00 +03:00
dev - > mtu = ETH_DATA_LEN - max_gtp_header_len ;
2016-05-09 01:55:48 +03:00
/* Zero header length. */
dev - > type = ARPHRD_NONE ;
dev - > flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST ;
dev - > priv_flags | = IFF_NO_QUEUE ;
dev - > features | = NETIF_F_LLTX ;
netif_keep_dst ( dev ) ;
2021-02-03 10:08:00 +03:00
dev - > needed_headroom = LL_MAX_HEADER + max_gtp_header_len ;
2016-05-09 01:55:48 +03:00
}
static int gtp_hashtable_new ( struct gtp_dev * gtp , int hsize ) ;
2021-02-03 10:07:59 +03:00
static int gtp_encap_enable ( struct gtp_dev * gtp , struct nlattr * data [ ] ) ;
2016-05-09 01:55:48 +03:00
2019-12-11 11:23:34 +03:00
static void gtp_destructor ( struct net_device * dev )
{
struct gtp_dev * gtp = netdev_priv ( dev ) ;
kfree ( gtp - > addr_hash ) ;
kfree ( gtp - > tid_hash ) ;
}
2022-03-04 19:40:42 +03:00
static struct sock * gtp_create_sock ( int type , struct gtp_dev * gtp )
{
struct udp_tunnel_sock_cfg tuncfg = { } ;
struct udp_port_cfg udp_conf = {
. local_ip . s_addr = htonl ( INADDR_ANY ) ,
. family = AF_INET ,
} ;
struct net * net = gtp - > net ;
struct socket * sock ;
int err ;
if ( type = = UDP_ENCAP_GTP0 )
udp_conf . local_udp_port = htons ( GTP0_PORT ) ;
else if ( type = = UDP_ENCAP_GTP1U )
udp_conf . local_udp_port = htons ( GTP1U_PORT ) ;
else
return ERR_PTR ( - EINVAL ) ;
err = udp_sock_create ( net , & udp_conf , & sock ) ;
if ( err )
return ERR_PTR ( err ) ;
tuncfg . sk_user_data = gtp ;
tuncfg . encap_type = type ;
tuncfg . encap_rcv = gtp_encap_recv ;
tuncfg . encap_destroy = NULL ;
setup_udp_tunnel_sock ( net , sock , & tuncfg ) ;
return sock - > sk ;
}
static int gtp_create_sockets ( struct gtp_dev * gtp , struct nlattr * data [ ] )
{
struct sock * sk1u = NULL ;
struct sock * sk0 = NULL ;
sk0 = gtp_create_sock ( UDP_ENCAP_GTP0 , gtp ) ;
if ( IS_ERR ( sk0 ) )
return PTR_ERR ( sk0 ) ;
sk1u = gtp_create_sock ( UDP_ENCAP_GTP1U , gtp ) ;
if ( IS_ERR ( sk1u ) ) {
udp_tunnel_sock_release ( sk0 - > sk_socket ) ;
return PTR_ERR ( sk1u ) ;
}
gtp - > sk_created = true ;
gtp - > sk0 = sk0 ;
gtp - > sk1u = sk1u ;
return 0 ;
}
2016-05-09 01:55:48 +03:00
static int gtp_newlink ( struct net * src_net , struct net_device * dev ,
2017-06-26 00:55:59 +03:00
struct nlattr * tb [ ] , struct nlattr * data [ ] ,
struct netlink_ext_ack * extack )
2016-05-09 01:55:48 +03:00
{
2022-03-04 19:40:42 +03:00
unsigned int role = GTP_ROLE_GGSN ;
2016-05-09 01:55:48 +03:00
struct gtp_dev * gtp ;
struct gtp_net * gn ;
2017-03-09 19:42:57 +03:00
int hashsize , err ;
2016-05-09 01:55:48 +03:00
gtp = netdev_priv ( dev ) ;
2019-12-11 11:23:48 +03:00
if ( ! data [ IFLA_GTP_PDP_HASHSIZE ] ) {
2016-05-09 01:55:48 +03:00
hashsize = 1024 ;
2019-12-11 11:23:48 +03:00
} else {
2016-05-09 01:55:48 +03:00
hashsize = nla_get_u32 ( data [ IFLA_GTP_PDP_HASHSIZE ] ) ;
2019-12-11 11:23:48 +03:00
if ( ! hashsize )
hashsize = 1024 ;
}
2016-05-09 01:55:48 +03:00
2022-03-04 19:40:42 +03:00
if ( data [ IFLA_GTP_ROLE ] ) {
role = nla_get_u32 ( data [ IFLA_GTP_ROLE ] ) ;
if ( role > GTP_ROLE_SGSN )
return - EINVAL ;
}
gtp - > role = role ;
2022-03-04 19:40:43 +03:00
if ( ! data [ IFLA_GTP_RESTART_COUNT ] )
gtp - > restart_count = 0 ;
else
gtp - > restart_count = nla_get_u8 ( data [ IFLA_GTP_RESTART_COUNT ] ) ;
2022-03-04 19:40:42 +03:00
gtp - > net = src_net ;
2016-05-09 01:55:48 +03:00
err = gtp_hashtable_new ( gtp , hashsize ) ;
if ( err < 0 )
2020-10-27 14:48:46 +03:00
return err ;
2022-03-04 19:40:42 +03:00
if ( data [ IFLA_GTP_CREATE_SOCKETS ] )
err = gtp_create_sockets ( gtp , data ) ;
else
err = gtp_encap_enable ( gtp , data ) ;
2020-10-27 14:48:46 +03:00
if ( err < 0 )
goto out_hashtable ;
2016-05-09 01:55:48 +03:00
err = register_netdevice ( dev ) ;
if ( err < 0 ) {
netdev_dbg ( dev , " failed to register new netdev %d \n " , err ) ;
2020-10-27 14:48:46 +03:00
goto out_encap ;
2016-05-09 01:55:48 +03:00
}
gn = net_generic ( dev_net ( dev ) , gtp_net_id ) ;
list_add_rcu ( & gtp - > list , & gn - > gtp_dev_list ) ;
2019-12-11 11:23:34 +03:00
dev - > priv_destructor = gtp_destructor ;
2016-05-09 01:55:48 +03:00
2021-02-03 10:07:59 +03:00
netdev_dbg ( dev , " registered new GTP interface \n " ) ;
2016-05-09 01:55:48 +03:00
return 0 ;
2020-10-27 14:48:46 +03:00
out_encap :
gtp_encap_disable ( gtp ) ;
2016-05-09 01:55:48 +03:00
out_hashtable :
2019-12-11 11:23:34 +03:00
kfree ( gtp - > addr_hash ) ;
kfree ( gtp - > tid_hash ) ;
2016-05-09 01:55:48 +03:00
return err ;
}
static void gtp_dellink ( struct net_device * dev , struct list_head * head )
{
struct gtp_dev * gtp = netdev_priv ( dev ) ;
2019-12-11 11:23:34 +03:00
struct pdp_ctx * pctx ;
int i ;
for ( i = 0 ; i < gtp - > hash_size ; i + + )
hlist_for_each_entry_rcu ( pctx , & gtp - > tid_hash [ i ] , hlist_tid )
pdp_context_delete ( pctx ) ;
2016-05-09 01:55:48 +03:00
list_del_rcu ( & gtp - > list ) ;
unregister_netdevice_queue ( dev , head ) ;
}
static const struct nla_policy gtp_policy [ IFLA_GTP_MAX + 1 ] = {
[ IFLA_GTP_FD0 ] = { . type = NLA_U32 } ,
[ IFLA_GTP_FD1 ] = { . type = NLA_U32 } ,
[ IFLA_GTP_PDP_HASHSIZE ] = { . type = NLA_U32 } ,
2017-03-25 01:23:21 +03:00
[ IFLA_GTP_ROLE ] = { . type = NLA_U32 } ,
2022-03-04 19:40:42 +03:00
[ IFLA_GTP_CREATE_SOCKETS ] = { . type = NLA_U8 } ,
2022-03-04 19:40:43 +03:00
[ IFLA_GTP_RESTART_COUNT ] = { . type = NLA_U8 } ,
2016-05-09 01:55:48 +03:00
} ;
2017-06-26 00:56:01 +03:00
static int gtp_validate ( struct nlattr * tb [ ] , struct nlattr * data [ ] ,
struct netlink_ext_ack * extack )
2016-05-09 01:55:48 +03:00
{
if ( ! data )
return - EINVAL ;
return 0 ;
}
static size_t gtp_get_size ( const struct net_device * dev )
{
2021-02-03 10:08:01 +03:00
return nla_total_size ( sizeof ( __u32 ) ) + /* IFLA_GTP_PDP_HASHSIZE */
2022-03-04 19:40:43 +03:00
nla_total_size ( sizeof ( __u32 ) ) + /* IFLA_GTP_ROLE */
nla_total_size ( sizeof ( __u8 ) ) ; /* IFLA_GTP_RESTART_COUNT */
2016-05-09 01:55:48 +03:00
}
static int gtp_fill_info ( struct sk_buff * skb , const struct net_device * dev )
{
struct gtp_dev * gtp = netdev_priv ( dev ) ;
if ( nla_put_u32 ( skb , IFLA_GTP_PDP_HASHSIZE , gtp - > hash_size ) )
goto nla_put_failure ;
2021-02-03 10:08:01 +03:00
if ( nla_put_u32 ( skb , IFLA_GTP_ROLE , gtp - > role ) )
goto nla_put_failure ;
2022-03-04 19:40:43 +03:00
if ( nla_put_u8 ( skb , IFLA_GTP_RESTART_COUNT , gtp - > restart_count ) )
goto nla_put_failure ;
2016-05-09 01:55:48 +03:00
return 0 ;
nla_put_failure :
return - EMSGSIZE ;
}
static struct rtnl_link_ops gtp_link_ops __read_mostly = {
. kind = " gtp " ,
. maxtype = IFLA_GTP_MAX ,
. policy = gtp_policy ,
. priv_size = sizeof ( struct gtp_dev ) ,
. setup = gtp_link_setup ,
. validate = gtp_validate ,
. newlink = gtp_newlink ,
. dellink = gtp_dellink ,
. get_size = gtp_get_size ,
. fill_info = gtp_fill_info ,
} ;
static int gtp_hashtable_new ( struct gtp_dev * gtp , int hsize )
{
int i ;
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 23:55:00 +03:00
gtp - > addr_hash = kmalloc_array ( hsize , sizeof ( struct hlist_head ) ,
2020-02-04 06:24:59 +03:00
GFP_KERNEL | __GFP_NOWARN ) ;
2016-05-09 01:55:48 +03:00
if ( gtp - > addr_hash = = NULL )
return - ENOMEM ;
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 23:55:00 +03:00
gtp - > tid_hash = kmalloc_array ( hsize , sizeof ( struct hlist_head ) ,
2020-02-04 06:24:59 +03:00
GFP_KERNEL | __GFP_NOWARN ) ;
2016-05-09 01:55:48 +03:00
if ( gtp - > tid_hash = = NULL )
goto err1 ;
gtp - > hash_size = hsize ;
for ( i = 0 ; i < hsize ; i + + ) {
INIT_HLIST_HEAD ( & gtp - > addr_hash [ i ] ) ;
INIT_HLIST_HEAD ( & gtp - > tid_hash [ i ] ) ;
}
return 0 ;
err1 :
kfree ( gtp - > addr_hash ) ;
return - ENOMEM ;
}
2021-02-03 10:07:59 +03:00
static struct sock * gtp_encap_enable_socket ( int fd , int type ,
struct gtp_dev * gtp )
2016-05-09 01:55:48 +03:00
{
struct udp_tunnel_sock_cfg tuncfg = { NULL } ;
2021-02-03 10:07:59 +03:00
struct socket * sock ;
2017-03-09 19:42:57 +03:00
struct sock * sk ;
2021-02-03 10:07:59 +03:00
int err ;
pr_debug ( " enable gtp on %d, %d \n " , fd , type ) ;
sock = sockfd_lookup ( fd , & err ) ;
if ( ! sock ) {
pr_debug ( " gtp socket fd=%d not found \n " , fd ) ;
return NULL ;
}
2016-05-09 01:55:48 +03:00
2020-01-22 10:17:14 +03:00
sk = sock - > sk ;
if ( sk - > sk_protocol ! = IPPROTO_UDP | |
sk - > sk_type ! = SOCK_DGRAM | |
( sk - > sk_family ! = AF_INET & & sk - > sk_family ! = AF_INET6 ) ) {
2021-02-03 10:07:59 +03:00
pr_debug ( " socket fd=%d not UDP \n " , fd ) ;
sk = ERR_PTR ( - EINVAL ) ;
goto out_sock ;
2016-05-09 01:55:48 +03:00
}
2020-01-22 10:17:14 +03:00
lock_sock ( sk ) ;
if ( sk - > sk_user_data ) {
2021-02-03 10:07:59 +03:00
sk = ERR_PTR ( - EBUSY ) ;
goto out_rel_sock ;
2016-05-09 01:55:48 +03:00
}
2017-03-09 19:42:57 +03:00
sock_hold ( sk ) ;
2016-05-09 01:55:48 +03:00
tuncfg . sk_user_data = gtp ;
2017-03-09 19:42:57 +03:00
tuncfg . encap_type = type ;
2016-05-09 01:55:48 +03:00
tuncfg . encap_rcv = gtp_encap_recv ;
tuncfg . encap_destroy = gtp_encap_destroy ;
2017-03-09 19:42:57 +03:00
setup_udp_tunnel_sock ( sock_net ( sock - > sk ) , sock , & tuncfg ) ;
2021-01-10 10:00:21 +03:00
2021-02-03 10:07:59 +03:00
out_rel_sock :
release_sock ( sock - > sk ) ;
out_sock :
2017-03-09 19:42:57 +03:00
sockfd_put ( sock ) ;
2021-02-03 10:07:59 +03:00
return sk ;
2017-03-09 19:42:57 +03:00
}
2016-05-09 01:55:48 +03:00
2021-02-03 10:07:59 +03:00
static int gtp_encap_enable ( struct gtp_dev * gtp , struct nlattr * data [ ] )
2017-03-09 19:42:57 +03:00
{
struct sock * sk1u = NULL ;
struct sock * sk0 = NULL ;
2022-03-04 19:40:42 +03:00
if ( ! data [ IFLA_GTP_FD0 ] & & ! data [ IFLA_GTP_FD1 ] )
return - EINVAL ;
2017-03-09 19:42:57 +03:00
if ( data [ IFLA_GTP_FD0 ] ) {
u32 fd0 = nla_get_u32 ( data [ IFLA_GTP_FD0 ] ) ;
sk0 = gtp_encap_enable_socket ( fd0 , UDP_ENCAP_GTP0 , gtp ) ;
if ( IS_ERR ( sk0 ) )
return PTR_ERR ( sk0 ) ;
}
if ( data [ IFLA_GTP_FD1 ] ) {
u32 fd1 = nla_get_u32 ( data [ IFLA_GTP_FD1 ] ) ;
sk1u = gtp_encap_enable_socket ( fd1 , UDP_ENCAP_GTP1U , gtp ) ;
if ( IS_ERR ( sk1u ) ) {
2020-01-05 20:36:07 +03:00
gtp_encap_disable_sock ( sk0 ) ;
2017-03-09 19:42:57 +03:00
return PTR_ERR ( sk1u ) ;
}
}
gtp - > sk0 = sk0 ;
gtp - > sk1u = sk1u ;
return 0 ;
2016-05-09 01:55:48 +03:00
}
2017-03-09 19:42:58 +03:00
static struct gtp_dev * gtp_find_dev ( struct net * src_net , struct nlattr * nla [ ] )
2016-05-09 01:55:48 +03:00
{
2017-03-09 19:42:58 +03:00
struct gtp_dev * gtp = NULL ;
struct net_device * dev ;
struct net * net ;
2016-05-09 01:55:48 +03:00
2017-03-09 19:42:58 +03:00
/* Examine the link attributes and figure out which network namespace
* we are talking about .
*/
if ( nla [ GTPA_NET_NS_FD ] )
net = get_net_ns_by_fd ( nla_get_u32 ( nla [ GTPA_NET_NS_FD ] ) ) ;
else
net = get_net ( src_net ) ;
if ( IS_ERR ( net ) )
return NULL ;
/* Check if there's an existing gtpX device to configure */
dev = dev_get_by_index_rcu ( net , nla_get_u32 ( nla [ GTPA_LINK ] ) ) ;
2017-05-24 02:18:37 +03:00
if ( dev & & dev - > netdev_ops = = & gtp_netdev_ops )
2017-03-09 19:42:58 +03:00
gtp = netdev_priv ( dev ) ;
put_net ( net ) ;
return gtp ;
2016-05-09 01:55:48 +03:00
}
static void ipv4_pdp_fill ( struct pdp_ctx * pctx , struct genl_info * info )
{
pctx - > gtp_version = nla_get_u32 ( info - > attrs [ GTPA_VERSION ] ) ;
pctx - > af = AF_INET ;
2017-03-25 01:23:20 +03:00
pctx - > peer_addr_ip4 . s_addr =
nla_get_be32 ( info - > attrs [ GTPA_PEER_ADDRESS ] ) ;
2016-05-09 01:55:48 +03:00
pctx - > ms_addr_ip4 . s_addr =
nla_get_be32 ( info - > attrs [ GTPA_MS_ADDRESS ] ) ;
switch ( pctx - > gtp_version ) {
case GTP_V0 :
/* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
* label needs to be the same for uplink and downlink packets ,
* so let ' s annotate this .
*/
pctx - > u . v0 . tid = nla_get_u64 ( info - > attrs [ GTPA_TID ] ) ;
pctx - > u . v0 . flow = nla_get_u16 ( info - > attrs [ GTPA_FLOW ] ) ;
break ;
case GTP_V1 :
pctx - > u . v1 . i_tei = nla_get_u32 ( info - > attrs [ GTPA_I_TEI ] ) ;
pctx - > u . v1 . o_tei = nla_get_u32 ( info - > attrs [ GTPA_O_TEI ] ) ;
break ;
default :
break ;
}
}
2020-08-27 15:19:23 +03:00
static struct pdp_ctx * gtp_pdp_add ( struct gtp_dev * gtp , struct sock * sk ,
struct genl_info * info )
2016-05-09 01:55:48 +03:00
{
2019-12-11 11:23:00 +03:00
struct pdp_ctx * pctx , * pctx_tid = NULL ;
2017-03-09 19:42:58 +03:00
struct net_device * dev = gtp - > dev ;
2016-05-09 01:55:48 +03:00
u32 hash_ms , hash_tid = 0 ;
2019-12-11 11:23:00 +03:00
unsigned int version ;
2016-05-09 01:55:48 +03:00
bool found = false ;
__be32 ms_addr ;
ms_addr = nla_get_be32 ( info - > attrs [ GTPA_MS_ADDRESS ] ) ;
hash_ms = ipv4_hashfn ( ms_addr ) % gtp - > hash_size ;
2019-12-11 11:23:00 +03:00
version = nla_get_u32 ( info - > attrs [ GTPA_VERSION ] ) ;
2016-05-09 01:55:48 +03:00
2019-12-11 11:23:00 +03:00
pctx = ipv4_pdp_find ( gtp , ms_addr ) ;
if ( pctx )
found = true ;
if ( version = = GTP_V0 )
pctx_tid = gtp0_pdp_find ( gtp ,
nla_get_u64 ( info - > attrs [ GTPA_TID ] ) ) ;
else if ( version = = GTP_V1 )
pctx_tid = gtp1_pdp_find ( gtp ,
nla_get_u32 ( info - > attrs [ GTPA_I_TEI ] ) ) ;
if ( pctx_tid )
found = true ;
2016-05-09 01:55:48 +03:00
if ( found ) {
if ( info - > nlhdr - > nlmsg_flags & NLM_F_EXCL )
2020-08-27 15:19:23 +03:00
return ERR_PTR ( - EEXIST ) ;
2016-05-09 01:55:48 +03:00
if ( info - > nlhdr - > nlmsg_flags & NLM_F_REPLACE )
2020-08-27 15:19:23 +03:00
return ERR_PTR ( - EOPNOTSUPP ) ;
2016-05-09 01:55:48 +03:00
2019-12-11 11:23:00 +03:00
if ( pctx & & pctx_tid )
2020-08-27 15:19:23 +03:00
return ERR_PTR ( - EEXIST ) ;
2019-12-11 11:23:00 +03:00
if ( ! pctx )
pctx = pctx_tid ;
2016-05-09 01:55:48 +03:00
ipv4_pdp_fill ( pctx , info ) ;
if ( pctx - > gtp_version = = GTP_V0 )
netdev_dbg ( dev , " GTPv0-U: update tunnel id = %llx (pdp %p) \n " ,
pctx - > u . v0 . tid , pctx ) ;
else if ( pctx - > gtp_version = = GTP_V1 )
netdev_dbg ( dev , " GTPv1-U: update tunnel id = %x/%x (pdp %p) \n " ,
pctx - > u . v1 . i_tei , pctx - > u . v1 . o_tei , pctx ) ;
2020-08-27 15:19:23 +03:00
return pctx ;
2016-05-09 01:55:48 +03:00
}
2019-07-02 18:23:13 +03:00
pctx = kmalloc ( sizeof ( * pctx ) , GFP_ATOMIC ) ;
2016-05-09 01:55:48 +03:00
if ( pctx = = NULL )
2020-08-27 15:19:23 +03:00
return ERR_PTR ( - ENOMEM ) ;
2016-05-09 01:55:48 +03:00
2017-03-09 19:43:02 +03:00
sock_hold ( sk ) ;
pctx - > sk = sk ;
2017-03-09 19:42:59 +03:00
pctx - > dev = gtp - > dev ;
2016-05-09 01:55:48 +03:00
ipv4_pdp_fill ( pctx , info ) ;
atomic_set ( & pctx - > tx_seq , 0 ) ;
switch ( pctx - > gtp_version ) {
case GTP_V0 :
/* TS 09.60: "The flow label identifies unambiguously a GTP
* flow . " . We use the tid for this instead, I cannot find a
* situation in which this doesn ' t unambiguosly identify the
* PDP context .
*/
hash_tid = gtp0_hashfn ( pctx - > u . v0 . tid ) % gtp - > hash_size ;
break ;
case GTP_V1 :
hash_tid = gtp1u_hashfn ( pctx - > u . v1 . i_tei ) % gtp - > hash_size ;
break ;
}
hlist_add_head_rcu ( & pctx - > hlist_addr , & gtp - > addr_hash [ hash_ms ] ) ;
hlist_add_head_rcu ( & pctx - > hlist_tid , & gtp - > tid_hash [ hash_tid ] ) ;
switch ( pctx - > gtp_version ) {
case GTP_V0 :
netdev_dbg ( dev , " GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p) \n " ,
2017-03-25 01:23:20 +03:00
pctx - > u . v0 . tid , & pctx - > peer_addr_ip4 ,
2016-05-09 01:55:48 +03:00
& pctx - > ms_addr_ip4 , pctx ) ;
break ;
case GTP_V1 :
netdev_dbg ( dev , " GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p) \n " ,
pctx - > u . v1 . i_tei , pctx - > u . v1 . o_tei ,
2017-03-25 01:23:20 +03:00
& pctx - > peer_addr_ip4 , & pctx - > ms_addr_ip4 , pctx ) ;
2016-05-09 01:55:48 +03:00
break ;
}
2020-08-27 15:19:23 +03:00
return pctx ;
2016-05-09 01:55:48 +03:00
}
2017-03-09 19:43:02 +03:00
static void pdp_context_free ( struct rcu_head * head )
{
struct pdp_ctx * pctx = container_of ( head , struct pdp_ctx , rcu_head ) ;
sock_put ( pctx - > sk ) ;
kfree ( pctx ) ;
}
2017-03-09 19:43:01 +03:00
static void pdp_context_delete ( struct pdp_ctx * pctx )
{
hlist_del_rcu ( & pctx - > hlist_tid ) ;
hlist_del_rcu ( & pctx - > hlist_addr ) ;
2017-03-09 19:43:02 +03:00
call_rcu ( & pctx - > rcu_head , pdp_context_free ) ;
2017-03-09 19:43:01 +03:00
}
2020-08-28 16:30:56 +03:00
static int gtp_tunnel_notify ( struct pdp_ctx * pctx , u8 cmd , gfp_t allocation ) ;
2020-08-27 15:19:23 +03:00
2016-05-09 01:55:48 +03:00
static int gtp_genl_new_pdp ( struct sk_buff * skb , struct genl_info * info )
{
2017-03-09 19:43:02 +03:00
unsigned int version ;
2020-08-27 15:19:23 +03:00
struct pdp_ctx * pctx ;
2017-03-09 19:42:58 +03:00
struct gtp_dev * gtp ;
2017-03-09 19:43:02 +03:00
struct sock * sk ;
2017-03-09 19:42:58 +03:00
int err ;
2016-05-09 01:55:48 +03:00
if ( ! info - > attrs [ GTPA_VERSION ] | |
! info - > attrs [ GTPA_LINK ] | |
2017-03-25 01:23:20 +03:00
! info - > attrs [ GTPA_PEER_ADDRESS ] | |
2016-05-09 01:55:48 +03:00
! info - > attrs [ GTPA_MS_ADDRESS ] )
return - EINVAL ;
2017-03-09 19:43:02 +03:00
version = nla_get_u32 ( info - > attrs [ GTPA_VERSION ] ) ;
switch ( version ) {
2016-05-09 01:55:48 +03:00
case GTP_V0 :
if ( ! info - > attrs [ GTPA_TID ] | |
! info - > attrs [ GTPA_FLOW ] )
return - EINVAL ;
break ;
case GTP_V1 :
if ( ! info - > attrs [ GTPA_I_TEI ] | |
! info - > attrs [ GTPA_O_TEI ] )
return - EINVAL ;
break ;
default :
return - EINVAL ;
}
2019-07-02 18:22:25 +03:00
rtnl_lock ( ) ;
2016-05-09 01:55:48 +03:00
2017-03-09 19:42:58 +03:00
gtp = gtp_find_dev ( sock_net ( skb - > sk ) , info - > attrs ) ;
if ( ! gtp ) {
err = - ENODEV ;
goto out_unlock ;
2016-05-12 18:16:31 +03:00
}
2016-05-09 01:55:48 +03:00
2017-03-09 19:43:02 +03:00
if ( version = = GTP_V0 )
sk = gtp - > sk0 ;
else if ( version = = GTP_V1 )
sk = gtp - > sk1u ;
else
sk = NULL ;
if ( ! sk ) {
err = - ENODEV ;
goto out_unlock ;
}
2020-08-27 15:19:23 +03:00
pctx = gtp_pdp_add ( gtp , sk , info ) ;
if ( IS_ERR ( pctx ) ) {
err = PTR_ERR ( pctx ) ;
} else {
2020-08-28 16:30:56 +03:00
gtp_tunnel_notify ( pctx , GTP_CMD_NEWPDP , GFP_KERNEL ) ;
2020-08-27 15:19:23 +03:00
err = 0 ;
}
2017-03-09 19:42:58 +03:00
out_unlock :
2019-07-02 18:22:25 +03:00
rtnl_unlock ( ) ;
2017-03-09 19:42:58 +03:00
return err ;
2016-05-09 01:55:48 +03:00
}
2017-03-09 19:43:00 +03:00
static struct pdp_ctx * gtp_find_pdp_by_link ( struct net * net ,
struct nlattr * nla [ ] )
2016-05-09 01:55:48 +03:00
{
struct gtp_dev * gtp ;
2017-03-09 19:43:00 +03:00
gtp = gtp_find_dev ( net , nla ) ;
if ( ! gtp )
return ERR_PTR ( - ENODEV ) ;
2016-05-09 01:55:48 +03:00
2017-03-09 19:43:00 +03:00
if ( nla [ GTPA_MS_ADDRESS ] ) {
__be32 ip = nla_get_be32 ( nla [ GTPA_MS_ADDRESS ] ) ;
2016-05-09 01:55:48 +03:00
2017-03-09 19:43:00 +03:00
return ipv4_pdp_find ( gtp , ip ) ;
} else if ( nla [ GTPA_VERSION ] ) {
u32 gtp_version = nla_get_u32 ( nla [ GTPA_VERSION ] ) ;
if ( gtp_version = = GTP_V0 & & nla [ GTPA_TID ] )
return gtp0_pdp_find ( gtp , nla_get_u64 ( nla [ GTPA_TID ] ) ) ;
else if ( gtp_version = = GTP_V1 & & nla [ GTPA_I_TEI ] )
return gtp1_pdp_find ( gtp , nla_get_u32 ( nla [ GTPA_I_TEI ] ) ) ;
2016-05-12 18:16:31 +03:00
}
2016-05-09 01:55:48 +03:00
2017-03-09 19:43:00 +03:00
return ERR_PTR ( - EINVAL ) ;
}
2016-05-09 01:55:48 +03:00
2017-03-09 19:43:00 +03:00
static struct pdp_ctx * gtp_find_pdp ( struct net * net , struct nlattr * nla [ ] )
{
struct pdp_ctx * pctx ;
2016-05-09 01:55:48 +03:00
2017-03-09 19:43:00 +03:00
if ( nla [ GTPA_LINK ] )
pctx = gtp_find_pdp_by_link ( net , nla ) ;
else
pctx = ERR_PTR ( - EINVAL ) ;
if ( ! pctx )
pctx = ERR_PTR ( - ENOENT ) ;
return pctx ;
}
static int gtp_genl_del_pdp ( struct sk_buff * skb , struct genl_info * info )
{
struct pdp_ctx * pctx ;
int err = 0 ;
if ( ! info - > attrs [ GTPA_VERSION ] )
return - EINVAL ;
rcu_read_lock ( ) ;
pctx = gtp_find_pdp ( sock_net ( skb - > sk ) , info - > attrs ) ;
if ( IS_ERR ( pctx ) ) {
err = PTR_ERR ( pctx ) ;
2017-03-09 19:42:58 +03:00
goto out_unlock ;
}
2016-05-09 01:55:48 +03:00
if ( pctx - > gtp_version = = GTP_V0 )
2017-03-09 19:43:00 +03:00
netdev_dbg ( pctx - > dev , " GTPv0-U: deleting tunnel id = %llx (pdp %p) \n " ,
2016-05-09 01:55:48 +03:00
pctx - > u . v0 . tid , pctx ) ;
else if ( pctx - > gtp_version = = GTP_V1 )
2017-03-09 19:43:00 +03:00
netdev_dbg ( pctx - > dev , " GTPv1-U: deleting tunnel id = %x/%x (pdp %p) \n " ,
2016-05-09 01:55:48 +03:00
pctx - > u . v1 . i_tei , pctx - > u . v1 . o_tei , pctx ) ;
2020-08-28 16:30:56 +03:00
gtp_tunnel_notify ( pctx , GTP_CMD_DELPDP , GFP_ATOMIC ) ;
2017-03-09 19:43:01 +03:00
pdp_context_delete ( pctx ) ;
2016-05-09 01:55:48 +03:00
2017-03-09 19:42:58 +03:00
out_unlock :
rcu_read_unlock ( ) ;
return err ;
2016-05-09 01:55:48 +03:00
}
static int gtp_genl_fill_info ( struct sk_buff * skb , u32 snd_portid , u32 snd_seq ,
2020-04-30 08:01:36 +03:00
int flags , u32 type , struct pdp_ctx * pctx )
2016-05-09 01:55:48 +03:00
{
void * genlh ;
2020-04-30 08:01:36 +03:00
genlh = genlmsg_put ( skb , snd_portid , snd_seq , & gtp_genl_family , flags ,
2016-05-09 01:55:48 +03:00
type ) ;
if ( genlh = = NULL )
goto nlmsg_failure ;
if ( nla_put_u32 ( skb , GTPA_VERSION , pctx - > gtp_version ) | |
2020-08-25 15:59:40 +03:00
nla_put_u32 ( skb , GTPA_LINK , pctx - > dev - > ifindex ) | |
2017-03-25 01:23:20 +03:00
nla_put_be32 ( skb , GTPA_PEER_ADDRESS , pctx - > peer_addr_ip4 . s_addr ) | |
2016-05-09 01:55:48 +03:00
nla_put_be32 ( skb , GTPA_MS_ADDRESS , pctx - > ms_addr_ip4 . s_addr ) )
goto nla_put_failure ;
switch ( pctx - > gtp_version ) {
case GTP_V0 :
if ( nla_put_u64_64bit ( skb , GTPA_TID , pctx - > u . v0 . tid , GTPA_PAD ) | |
nla_put_u16 ( skb , GTPA_FLOW , pctx - > u . v0 . flow ) )
goto nla_put_failure ;
break ;
case GTP_V1 :
if ( nla_put_u32 ( skb , GTPA_I_TEI , pctx - > u . v1 . i_tei ) | |
nla_put_u32 ( skb , GTPA_O_TEI , pctx - > u . v1 . o_tei ) )
goto nla_put_failure ;
break ;
}
genlmsg_end ( skb , genlh ) ;
return 0 ;
nlmsg_failure :
nla_put_failure :
genlmsg_cancel ( skb , genlh ) ;
return - EMSGSIZE ;
}
2020-08-28 16:30:56 +03:00
static int gtp_tunnel_notify ( struct pdp_ctx * pctx , u8 cmd , gfp_t allocation )
2020-08-27 15:19:23 +03:00
{
struct sk_buff * msg ;
int ret ;
2020-08-28 16:30:56 +03:00
msg = nlmsg_new ( NLMSG_DEFAULT_SIZE , allocation ) ;
2020-08-27 15:19:23 +03:00
if ( ! msg )
return - ENOMEM ;
ret = gtp_genl_fill_info ( msg , 0 , 0 , 0 , cmd , pctx ) ;
if ( ret < 0 ) {
nlmsg_free ( msg ) ;
return ret ;
}
ret = genlmsg_multicast_netns ( & gtp_genl_family , dev_net ( pctx - > dev ) , msg ,
0 , GTP_GENL_MCGRP , GFP_ATOMIC ) ;
return ret ;
}
2016-05-09 01:55:48 +03:00
static int gtp_genl_get_pdp ( struct sk_buff * skb , struct genl_info * info )
{
struct pdp_ctx * pctx = NULL ;
struct sk_buff * skb2 ;
int err ;
2017-03-09 19:43:00 +03:00
if ( ! info - > attrs [ GTPA_VERSION ] )
2016-05-09 01:55:48 +03:00
return - EINVAL ;
2017-03-09 19:42:58 +03:00
rcu_read_lock ( ) ;
2016-05-09 01:55:48 +03:00
2017-03-09 19:43:00 +03:00
pctx = gtp_find_pdp ( sock_net ( skb - > sk ) , info - > attrs ) ;
if ( IS_ERR ( pctx ) ) {
err = PTR_ERR ( pctx ) ;
2016-05-09 01:55:48 +03:00
goto err_unlock ;
}
skb2 = genlmsg_new ( NLMSG_GOODSIZE , GFP_ATOMIC ) ;
if ( skb2 = = NULL ) {
err = - ENOMEM ;
goto err_unlock ;
}
2020-04-30 08:01:36 +03:00
err = gtp_genl_fill_info ( skb2 , NETLINK_CB ( skb ) . portid , info - > snd_seq ,
0 , info - > nlhdr - > nlmsg_type , pctx ) ;
2016-05-09 01:55:48 +03:00
if ( err < 0 )
goto err_unlock_free ;
rcu_read_unlock ( ) ;
return genlmsg_unicast ( genl_info_net ( info ) , skb2 , info - > snd_portid ) ;
err_unlock_free :
kfree_skb ( skb2 ) ;
err_unlock :
rcu_read_unlock ( ) ;
return err ;
}
static int gtp_genl_dump_pdp ( struct sk_buff * skb ,
struct netlink_callback * cb )
{
struct gtp_dev * last_gtp = ( struct gtp_dev * ) cb - > args [ 2 ] , * gtp ;
2019-12-11 11:23:17 +03:00
int i , j , bucket = cb - > args [ 0 ] , skip = cb - > args [ 1 ] ;
2016-05-09 01:55:48 +03:00
struct net * net = sock_net ( skb - > sk ) ;
struct pdp_ctx * pctx ;
2019-12-11 11:23:17 +03:00
struct gtp_net * gn ;
gn = net_generic ( net , gtp_net_id ) ;
2016-05-09 01:55:48 +03:00
if ( cb - > args [ 4 ] )
return 0 ;
2019-12-11 11:23:17 +03:00
rcu_read_lock ( ) ;
2016-05-09 01:55:48 +03:00
list_for_each_entry_rcu ( gtp , & gn - > gtp_dev_list , list ) {
if ( last_gtp & & last_gtp ! = gtp )
continue ;
else
last_gtp = NULL ;
2019-12-11 11:23:17 +03:00
for ( i = bucket ; i < gtp - > hash_size ; i + + ) {
j = 0 ;
hlist_for_each_entry_rcu ( pctx , & gtp - > tid_hash [ i ] ,
hlist_tid ) {
if ( j > = skip & &
gtp_genl_fill_info ( skb ,
NETLINK_CB ( cb - > skb ) . portid ,
cb - > nlh - > nlmsg_seq ,
2020-04-30 08:01:36 +03:00
NLM_F_MULTI ,
2019-12-11 11:23:17 +03:00
cb - > nlh - > nlmsg_type , pctx ) ) {
2016-05-09 01:55:48 +03:00
cb - > args [ 0 ] = i ;
2019-12-11 11:23:17 +03:00
cb - > args [ 1 ] = j ;
2016-05-09 01:55:48 +03:00
cb - > args [ 2 ] = ( unsigned long ) gtp ;
goto out ;
}
2019-12-11 11:23:17 +03:00
j + + ;
2016-05-09 01:55:48 +03:00
}
2019-12-11 11:23:17 +03:00
skip = 0 ;
2016-05-09 01:55:48 +03:00
}
2019-12-11 11:23:17 +03:00
bucket = 0 ;
2016-05-09 01:55:48 +03:00
}
cb - > args [ 4 ] = 1 ;
out :
2019-12-11 11:23:17 +03:00
rcu_read_unlock ( ) ;
2016-05-09 01:55:48 +03:00
return skb - > len ;
}
2022-03-04 19:40:44 +03:00
static int gtp_genl_send_echo_req ( struct sk_buff * skb , struct genl_info * info )
{
struct sk_buff * skb_to_send ;
__be32 src_ip , dst_ip ;
unsigned int version ;
struct gtp_dev * gtp ;
struct flowi4 fl4 ;
struct rtable * rt ;
struct sock * sk ;
__be16 port ;
int len ;
if ( ! info - > attrs [ GTPA_VERSION ] | |
! info - > attrs [ GTPA_LINK ] | |
! info - > attrs [ GTPA_PEER_ADDRESS ] | |
! info - > attrs [ GTPA_MS_ADDRESS ] )
return - EINVAL ;
version = nla_get_u32 ( info - > attrs [ GTPA_VERSION ] ) ;
dst_ip = nla_get_be32 ( info - > attrs [ GTPA_PEER_ADDRESS ] ) ;
src_ip = nla_get_be32 ( info - > attrs [ GTPA_MS_ADDRESS ] ) ;
gtp = gtp_find_dev ( sock_net ( skb - > sk ) , info - > attrs ) ;
if ( ! gtp )
return - ENODEV ;
if ( ! gtp - > sk_created )
return - EOPNOTSUPP ;
if ( ! ( gtp - > dev - > flags & IFF_UP ) )
return - ENETDOWN ;
if ( version = = GTP_V0 ) {
struct gtp0_header * gtp0_h ;
len = LL_RESERVED_SPACE ( gtp - > dev ) + sizeof ( struct gtp0_header ) +
sizeof ( struct iphdr ) + sizeof ( struct udphdr ) ;
skb_to_send = netdev_alloc_skb_ip_align ( gtp - > dev , len ) ;
if ( ! skb_to_send )
return - ENOMEM ;
sk = gtp - > sk0 ;
port = htons ( GTP0_PORT ) ;
gtp0_h = skb_push ( skb_to_send , sizeof ( struct gtp0_header ) ) ;
memset ( gtp0_h , 0 , sizeof ( struct gtp0_header ) ) ;
gtp0_build_echo_msg ( gtp0_h , GTP_ECHO_REQ ) ;
} else if ( version = = GTP_V1 ) {
struct gtp1_header_long * gtp1u_h ;
len = LL_RESERVED_SPACE ( gtp - > dev ) +
sizeof ( struct gtp1_header_long ) +
sizeof ( struct iphdr ) + sizeof ( struct udphdr ) ;
skb_to_send = netdev_alloc_skb_ip_align ( gtp - > dev , len ) ;
if ( ! skb_to_send )
return - ENOMEM ;
sk = gtp - > sk1u ;
port = htons ( GTP1U_PORT ) ;
gtp1u_h = skb_push ( skb_to_send ,
sizeof ( struct gtp1_header_long ) ) ;
memset ( gtp1u_h , 0 , sizeof ( struct gtp1_header_long ) ) ;
gtp1u_build_echo_msg ( gtp1u_h , GTP_ECHO_REQ ) ;
} else {
return - ENODEV ;
}
rt = ip4_route_output_gtp ( & fl4 , sk , dst_ip , src_ip ) ;
if ( IS_ERR ( rt ) ) {
netdev_dbg ( gtp - > dev , " no route for echo request to %pI4 \n " ,
& dst_ip ) ;
2022-03-14 19:41:37 +03:00
kfree_skb ( skb_to_send ) ;
2022-03-04 19:40:44 +03:00
return - ENODEV ;
}
udp_tunnel_xmit_skb ( rt , sk , skb_to_send ,
fl4 . saddr , fl4 . daddr ,
fl4 . flowi4_tos ,
ip4_dst_hoplimit ( & rt - > dst ) ,
0 ,
port , port ,
! net_eq ( sock_net ( sk ) ,
dev_net ( gtp - > dev ) ) ,
false ) ;
return 0 ;
}
2018-07-18 19:32:44 +03:00
static const struct nla_policy gtp_genl_policy [ GTPA_MAX + 1 ] = {
2016-05-09 01:55:48 +03:00
[ GTPA_LINK ] = { . type = NLA_U32 , } ,
[ GTPA_VERSION ] = { . type = NLA_U32 , } ,
[ GTPA_TID ] = { . type = NLA_U64 , } ,
2017-03-25 01:23:20 +03:00
[ GTPA_PEER_ADDRESS ] = { . type = NLA_U32 , } ,
2016-05-09 01:55:48 +03:00
[ GTPA_MS_ADDRESS ] = { . type = NLA_U32 , } ,
[ GTPA_FLOW ] = { . type = NLA_U16 , } ,
[ GTPA_NET_NS_FD ] = { . type = NLA_U32 , } ,
[ GTPA_I_TEI ] = { . type = NLA_U32 , } ,
[ GTPA_O_TEI ] = { . type = NLA_U32 , } ,
} ;
2020-10-03 00:49:54 +03:00
static const struct genl_small_ops gtp_genl_ops [ ] = {
2016-05-09 01:55:48 +03:00
{
. cmd = GTP_CMD_NEWPDP ,
2019-04-26 15:07:31 +03:00
. validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP ,
2016-05-09 01:55:48 +03:00
. doit = gtp_genl_new_pdp ,
. flags = GENL_ADMIN_PERM ,
} ,
{
. cmd = GTP_CMD_DELPDP ,
2019-04-26 15:07:31 +03:00
. validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP ,
2016-05-09 01:55:48 +03:00
. doit = gtp_genl_del_pdp ,
. flags = GENL_ADMIN_PERM ,
} ,
{
. cmd = GTP_CMD_GETPDP ,
2019-04-26 15:07:31 +03:00
. validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP ,
2016-05-09 01:55:48 +03:00
. doit = gtp_genl_get_pdp ,
. dumpit = gtp_genl_dump_pdp ,
. flags = GENL_ADMIN_PERM ,
} ,
2022-03-04 19:40:44 +03:00
{
. cmd = GTP_CMD_ECHOREQ ,
. validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP ,
. doit = gtp_genl_send_echo_req ,
. flags = GENL_ADMIN_PERM ,
} ,
2016-05-09 01:55:48 +03:00
} ;
2016-10-24 15:40:05 +03:00
static struct genl_family gtp_genl_family __ro_after_init = {
2016-10-24 15:40:03 +03:00
. name = " gtp " ,
. version = 0 ,
. hdrsize = 0 ,
. maxattr = GTPA_MAX ,
genetlink: make policy common to family
Since maxattr is common, the policy can't really differ sanely,
so make it common as well.
The only user that did in fact manage to make a non-common policy
is taskstats, which has to be really careful about it (since it's
still using a common maxattr!). This is no longer supported, but
we can fake it using pre_doit.
This reduces the size of e.g. nl80211.o (which has lots of commands):
text data bss dec hex filename
398745 14323 2240 415308 6564c net/wireless/nl80211.o (before)
397913 14331 2240 414484 65314 net/wireless/nl80211.o (after)
--------------------------------
-832 +8 0 -824
Which is obviously just 8 bytes for each command, and an added 8
bytes for the new policy pointer. I'm not sure why the ops list is
counted as .text though.
Most of the code transformations were done using the following spatch:
@ops@
identifier OPS;
expression POLICY;
@@
struct genl_ops OPS[] = {
...,
{
- .policy = POLICY,
},
...
};
@@
identifier ops.OPS;
expression ops.POLICY;
identifier fam;
expression M;
@@
struct genl_family fam = {
.ops = OPS,
.maxattr = M,
+ .policy = POLICY,
...
};
This also gets rid of devlink_nl_cmd_region_read_dumpit() accessing
the cb->data as ops, which we want to change in a later genl patch.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-03-22 00:51:02 +03:00
. policy = gtp_genl_policy ,
2016-10-24 15:40:03 +03:00
. netnsok = true ,
. module = THIS_MODULE ,
2020-10-03 00:49:54 +03:00
. small_ops = gtp_genl_ops ,
. n_small_ops = ARRAY_SIZE ( gtp_genl_ops ) ,
2022-08-25 03:18:30 +03:00
. resv_start_op = GTP_CMD_ECHOREQ + 1 ,
2020-08-27 15:19:23 +03:00
. mcgrps = gtp_genl_mcgrps ,
. n_mcgrps = ARRAY_SIZE ( gtp_genl_mcgrps ) ,
2016-10-24 15:40:03 +03:00
} ;
2016-05-09 01:55:48 +03:00
static int __net_init gtp_net_init ( struct net * net )
{
struct gtp_net * gn = net_generic ( net , gtp_net_id ) ;
INIT_LIST_HEAD ( & gn - > gtp_dev_list ) ;
return 0 ;
}
static void __net_exit gtp_net_exit ( struct net * net )
{
struct gtp_net * gn = net_generic ( net , gtp_net_id ) ;
struct gtp_dev * gtp ;
LIST_HEAD ( list ) ;
rtnl_lock ( ) ;
list_for_each_entry ( gtp , & gn - > gtp_dev_list , list )
gtp_dellink ( gtp - > dev , & list ) ;
unregister_netdevice_many ( & list ) ;
rtnl_unlock ( ) ;
}
static struct pernet_operations gtp_net_ops = {
. init = gtp_net_init ,
. exit = gtp_net_exit ,
. id = & gtp_net_id ,
. size = sizeof ( struct gtp_net ) ,
} ;
static int __init gtp_init ( void )
{
int err ;
get_random_bytes ( & gtp_h_initval , sizeof ( gtp_h_initval ) ) ;
err = rtnl_link_register ( & gtp_link_ops ) ;
if ( err < 0 )
goto error_out ;
2016-10-24 15:40:03 +03:00
err = genl_register_family ( & gtp_genl_family ) ;
2016-05-09 01:55:48 +03:00
if ( err < 0 )
goto unreg_rtnl_link ;
err = register_pernet_subsys ( & gtp_net_ops ) ;
if ( err < 0 )
goto unreg_genl_family ;
2021-02-03 10:07:59 +03:00
pr_info ( " GTP module loaded (pdp ctx size %zd bytes) \n " ,
2016-05-09 01:55:48 +03:00
sizeof ( struct pdp_ctx ) ) ;
return 0 ;
unreg_genl_family :
genl_unregister_family ( & gtp_genl_family ) ;
unreg_rtnl_link :
rtnl_link_unregister ( & gtp_link_ops ) ;
error_out :
pr_err ( " error loading GTP module loaded \n " ) ;
return err ;
}
late_initcall ( gtp_init ) ;
static void __exit gtp_fini ( void )
{
genl_unregister_family ( & gtp_genl_family ) ;
rtnl_link_unregister ( & gtp_link_ops ) ;
2019-07-02 18:23:42 +03:00
unregister_pernet_subsys ( & gtp_net_ops ) ;
2016-05-09 01:55:48 +03:00
pr_info ( " GTP module unloaded \n " ) ;
}
module_exit ( gtp_fini ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Harald Welte <hwelte@sysmocom.de> " ) ;
MODULE_DESCRIPTION ( " Interface driver for GTP encapsulated traffic " ) ;
MODULE_ALIAS_RTNL_LINK ( " gtp " ) ;
2017-01-27 12:40:56 +03:00
MODULE_ALIAS_GENL_FAMILY ( " gtp " ) ;