91d0b78c51
Users who want to share a single public IP address for outgoing connections between several hosts traditionally reach for SNAT. However, SNAT requires state keeping on the node(s) performing the NAT. A stateless alternative exists, where a single IP address used for egress can be shared between several hosts by partitioning the available ephemeral port range. In such a setup: 1. Each host gets assigned a disjoint range of ephemeral ports. 2. Applications open connections from the host-assigned port range. 3. Return traffic gets routed to the host based on both, the destination IP and the destination port. An application which wants to open an outgoing connection (connect) from a given port range today can choose between two solutions: 1. Manually pick the source port by bind()'ing to it before connect()'ing the socket. This approach has a couple of downsides: a) Search for a free port has to be implemented in the user-space. If the chosen 4-tuple happens to be busy, the application needs to retry from a different local port number. Detecting if 4-tuple is busy can be either easy (TCP) or hard (UDP). In TCP case, the application simply has to check if connect() returned an error (EADDRNOTAVAIL). That is assuming that the local port sharing was enabled (REUSEADDR) by all the sockets. # Assume desired local port range is 60_000-60_511 s = socket(AF_INET, SOCK_STREAM) s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) s.bind(("192.0.2.1", 60_000)) s.connect(("1.1.1.1", 53)) # Fails only if 192.0.2.1:60000 -> 1.1.1.1:53 is busy # Application must retry with another local port In case of UDP, the network stack allows binding more than one socket to the same 4-tuple, when local port sharing is enabled (REUSEADDR). Hence detecting the conflict is much harder and involves querying sock_diag and toggling the REUSEADDR flag [1]. b) For TCP, bind()-ing to a port within the ephemeral port range means that no connecting sockets, that is those which leave it to the network stack to find a free local port at connect() time, can use the this port. IOW, the bind hash bucket tb->fastreuse will be 0 or 1, and the port will be skipped during the free port search at connect() time. 2. Isolate the app in a dedicated netns and use the use the per-netns ip_local_port_range sysctl to adjust the ephemeral port range bounds. The per-netns setting affects all sockets, so this approach can be used only if: - there is just one egress IP address, or - the desired egress port range is the same for all egress IP addresses used by the application. For TCP, this approach avoids the downsides of (1). Free port search and 4-tuple conflict detection is done by the network stack: system("sysctl -w net.ipv4.ip_local_port_range='60000 60511'") s = socket(AF_INET, SOCK_STREAM) s.setsockopt(SOL_IP, IP_BIND_ADDRESS_NO_PORT, 1) s.bind(("192.0.2.1", 0)) s.connect(("1.1.1.1", 53)) # Fails if all 4-tuples 192.0.2.1:60000-60511 -> 1.1.1.1:53 are busy For UDP this approach has limited applicability. Setting the IP_BIND_ADDRESS_NO_PORT socket option does not result in local source port being shared with other connected UDP sockets. Hence relying on the network stack to find a free source port, limits the number of outgoing UDP flows from a single IP address down to the number of available ephemeral ports. To put it another way, partitioning the ephemeral port range between hosts using the existing Linux networking API is cumbersome. To address this use case, add a new socket option at the SOL_IP level, named IP_LOCAL_PORT_RANGE. The new option can be used to clamp down the ephemeral port range for each socket individually. The option can be used only to narrow down the per-netns local port range. If the per-socket range lies outside of the per-netns range, the latter takes precedence. UAPI-wise, the low and high range bounds are passed to the kernel as a pair of u16 values in host byte order packed into a u32. This avoids pointer passing. PORT_LO = 40_000 PORT_HI = 40_511 s = socket(AF_INET, SOCK_STREAM) v = struct.pack("I", PORT_HI << 16 | PORT_LO) s.setsockopt(SOL_IP, IP_LOCAL_PORT_RANGE, v) s.bind(("127.0.0.1", 0)) s.getsockname() # Local address between ("127.0.0.1", 40_000) and ("127.0.0.1", 40_511), # if there is a free port. EADDRINUSE otherwise. [1] https://github.com/cloudflare/cloudflare-blog/blob/232b432c1d57/2022-02-connectx/connectx.py#L116 Reviewed-by: Marek Majkowski <marek@cloudflare.com> Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com> Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
410 lines
10 KiB
C
410 lines
10 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* INET An implementation of the TCP/IP protocol suite for the LINUX
|
|
* operating system. INET is implemented using the BSD Socket
|
|
* interface as the means of communication with the user level.
|
|
*
|
|
* Definitions for inet_sock
|
|
*
|
|
* Authors: Many, reorganised here by
|
|
* Arnaldo Carvalho de Melo <acme@mandriva.com>
|
|
*/
|
|
#ifndef _INET_SOCK_H
|
|
#define _INET_SOCK_H
|
|
|
|
#include <linux/bitops.h>
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
#include <linux/jhash.h>
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <net/flow.h>
|
|
#include <net/sock.h>
|
|
#include <net/request_sock.h>
|
|
#include <net/netns/hash.h>
|
|
#include <net/tcp_states.h>
|
|
#include <net/l3mdev.h>
|
|
|
|
/** struct ip_options - IP Options
|
|
*
|
|
* @faddr - Saved first hop address
|
|
* @nexthop - Saved nexthop address in LSRR and SSRR
|
|
* @is_strictroute - Strict source route
|
|
* @srr_is_hit - Packet destination addr was our one
|
|
* @is_changed - IP checksum more not valid
|
|
* @rr_needaddr - Need to record addr of outgoing dev
|
|
* @ts_needtime - Need to record timestamp
|
|
* @ts_needaddr - Need to record addr of outgoing dev
|
|
*/
|
|
struct ip_options {
|
|
__be32 faddr;
|
|
__be32 nexthop;
|
|
unsigned char optlen;
|
|
unsigned char srr;
|
|
unsigned char rr;
|
|
unsigned char ts;
|
|
unsigned char is_strictroute:1,
|
|
srr_is_hit:1,
|
|
is_changed:1,
|
|
rr_needaddr:1,
|
|
ts_needtime:1,
|
|
ts_needaddr:1;
|
|
unsigned char router_alert;
|
|
unsigned char cipso;
|
|
unsigned char __pad2;
|
|
unsigned char __data[];
|
|
};
|
|
|
|
struct ip_options_rcu {
|
|
struct rcu_head rcu;
|
|
struct ip_options opt;
|
|
};
|
|
|
|
struct ip_options_data {
|
|
struct ip_options_rcu opt;
|
|
char data[40];
|
|
};
|
|
|
|
struct inet_request_sock {
|
|
struct request_sock req;
|
|
#define ir_loc_addr req.__req_common.skc_rcv_saddr
|
|
#define ir_rmt_addr req.__req_common.skc_daddr
|
|
#define ir_num req.__req_common.skc_num
|
|
#define ir_rmt_port req.__req_common.skc_dport
|
|
#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr
|
|
#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr
|
|
#define ir_iif req.__req_common.skc_bound_dev_if
|
|
#define ir_cookie req.__req_common.skc_cookie
|
|
#define ireq_net req.__req_common.skc_net
|
|
#define ireq_state req.__req_common.skc_state
|
|
#define ireq_family req.__req_common.skc_family
|
|
|
|
u16 snd_wscale : 4,
|
|
rcv_wscale : 4,
|
|
tstamp_ok : 1,
|
|
sack_ok : 1,
|
|
wscale_ok : 1,
|
|
ecn_ok : 1,
|
|
acked : 1,
|
|
no_srccheck: 1,
|
|
smc_ok : 1;
|
|
u32 ir_mark;
|
|
union {
|
|
struct ip_options_rcu __rcu *ireq_opt;
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
struct {
|
|
struct ipv6_txoptions *ipv6_opt;
|
|
struct sk_buff *pktopts;
|
|
};
|
|
#endif
|
|
};
|
|
};
|
|
|
|
static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
|
|
{
|
|
return (struct inet_request_sock *)sk;
|
|
}
|
|
|
|
static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
if (!sk->sk_mark &&
|
|
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept))
|
|
return skb->mark;
|
|
|
|
return sk->sk_mark;
|
|
}
|
|
|
|
static inline int inet_request_bound_dev_if(const struct sock *sk,
|
|
struct sk_buff *skb)
|
|
{
|
|
int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
|
|
#ifdef CONFIG_NET_L3_MASTER_DEV
|
|
struct net *net = sock_net(sk);
|
|
|
|
if (!bound_dev_if && READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept))
|
|
return l3mdev_master_ifindex_by_index(net, skb->skb_iif);
|
|
#endif
|
|
|
|
return bound_dev_if;
|
|
}
|
|
|
|
static inline int inet_sk_bound_l3mdev(const struct sock *sk)
|
|
{
|
|
#ifdef CONFIG_NET_L3_MASTER_DEV
|
|
struct net *net = sock_net(sk);
|
|
|
|
if (!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept))
|
|
return l3mdev_master_ifindex_by_index(net,
|
|
sk->sk_bound_dev_if);
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if,
|
|
int dif, int sdif)
|
|
{
|
|
if (!bound_dev_if)
|
|
return !sdif || l3mdev_accept;
|
|
return bound_dev_if == dif || bound_dev_if == sdif;
|
|
}
|
|
|
|
static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
|
|
int dif, int sdif)
|
|
{
|
|
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
|
|
return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept),
|
|
bound_dev_if, dif, sdif);
|
|
#else
|
|
return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
|
|
#endif
|
|
}
|
|
|
|
struct inet_cork {
|
|
unsigned int flags;
|
|
__be32 addr;
|
|
struct ip_options *opt;
|
|
unsigned int fragsize;
|
|
int length; /* Total length of all frames */
|
|
struct dst_entry *dst;
|
|
u8 tx_flags;
|
|
__u8 ttl;
|
|
__s16 tos;
|
|
char priority;
|
|
__u16 gso_size;
|
|
u64 transmit_time;
|
|
u32 mark;
|
|
};
|
|
|
|
struct inet_cork_full {
|
|
struct inet_cork base;
|
|
struct flowi fl;
|
|
};
|
|
|
|
struct ip_mc_socklist;
|
|
struct ipv6_pinfo;
|
|
struct rtable;
|
|
|
|
/** struct inet_sock - representation of INET sockets
|
|
*
|
|
* @sk - ancestor class
|
|
* @pinet6 - pointer to IPv6 control block
|
|
* @inet_daddr - Foreign IPv4 addr
|
|
* @inet_rcv_saddr - Bound local IPv4 addr
|
|
* @inet_dport - Destination port
|
|
* @inet_num - Local port
|
|
* @inet_saddr - Sending source
|
|
* @uc_ttl - Unicast TTL
|
|
* @inet_sport - Source port
|
|
* @inet_id - ID counter for DF pkts
|
|
* @tos - TOS
|
|
* @mc_ttl - Multicasting TTL
|
|
* @is_icsk - is this an inet_connection_sock?
|
|
* @uc_index - Unicast outgoing device index
|
|
* @mc_index - Multicast device index
|
|
* @mc_list - Group array
|
|
* @cork - info to build ip hdr on each ip frag while socket is corked
|
|
*/
|
|
struct inet_sock {
|
|
/* sk and pinet6 has to be the first two members of inet_sock */
|
|
struct sock sk;
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
struct ipv6_pinfo *pinet6;
|
|
#endif
|
|
/* Socket demultiplex comparisons on incoming packets. */
|
|
#define inet_daddr sk.__sk_common.skc_daddr
|
|
#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr
|
|
#define inet_dport sk.__sk_common.skc_dport
|
|
#define inet_num sk.__sk_common.skc_num
|
|
|
|
__be32 inet_saddr;
|
|
__s16 uc_ttl;
|
|
__u16 cmsg_flags;
|
|
struct ip_options_rcu __rcu *inet_opt;
|
|
__be16 inet_sport;
|
|
__u16 inet_id;
|
|
|
|
__u8 tos;
|
|
__u8 min_ttl;
|
|
__u8 mc_ttl;
|
|
__u8 pmtudisc;
|
|
__u8 recverr:1,
|
|
is_icsk:1,
|
|
freebind:1,
|
|
hdrincl:1,
|
|
mc_loop:1,
|
|
transparent:1,
|
|
mc_all:1,
|
|
nodefrag:1;
|
|
__u8 bind_address_no_port:1,
|
|
recverr_rfc4884:1,
|
|
defer_connect:1; /* Indicates that fastopen_connect is set
|
|
* and cookie exists so we defer connect
|
|
* until first data frame is written
|
|
*/
|
|
__u8 rcv_tos;
|
|
__u8 convert_csum;
|
|
int uc_index;
|
|
int mc_index;
|
|
__be32 mc_addr;
|
|
struct ip_mc_socklist __rcu *mc_list;
|
|
struct inet_cork_full cork;
|
|
struct {
|
|
__u16 lo;
|
|
__u16 hi;
|
|
} local_port_range;
|
|
};
|
|
|
|
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
|
|
#define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */
|
|
|
|
/* cmsg flags for inet */
|
|
#define IP_CMSG_PKTINFO BIT(0)
|
|
#define IP_CMSG_TTL BIT(1)
|
|
#define IP_CMSG_TOS BIT(2)
|
|
#define IP_CMSG_RECVOPTS BIT(3)
|
|
#define IP_CMSG_RETOPTS BIT(4)
|
|
#define IP_CMSG_PASSSEC BIT(5)
|
|
#define IP_CMSG_ORIGDSTADDR BIT(6)
|
|
#define IP_CMSG_CHECKSUM BIT(7)
|
|
#define IP_CMSG_RECVFRAGSIZE BIT(8)
|
|
|
|
static inline bool sk_is_inet(struct sock *sk)
|
|
{
|
|
return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
|
|
}
|
|
|
|
/**
|
|
* sk_to_full_sk - Access to a full socket
|
|
* @sk: pointer to a socket
|
|
*
|
|
* SYNACK messages might be attached to request sockets.
|
|
* Some places want to reach the listener in this case.
|
|
*/
|
|
static inline struct sock *sk_to_full_sk(struct sock *sk)
|
|
{
|
|
#ifdef CONFIG_INET
|
|
if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
|
|
sk = inet_reqsk(sk)->rsk_listener;
|
|
#endif
|
|
return sk;
|
|
}
|
|
|
|
/* sk_to_full_sk() variant with a const argument */
|
|
static inline const struct sock *sk_const_to_full_sk(const struct sock *sk)
|
|
{
|
|
#ifdef CONFIG_INET
|
|
if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
|
|
sk = ((const struct request_sock *)sk)->rsk_listener;
|
|
#endif
|
|
return sk;
|
|
}
|
|
|
|
static inline struct sock *skb_to_full_sk(const struct sk_buff *skb)
|
|
{
|
|
return sk_to_full_sk(skb->sk);
|
|
}
|
|
|
|
static inline struct inet_sock *inet_sk(const struct sock *sk)
|
|
{
|
|
return (struct inet_sock *)sk;
|
|
}
|
|
|
|
static inline void __inet_sk_copy_descendant(struct sock *sk_to,
|
|
const struct sock *sk_from,
|
|
const int ancestor_size)
|
|
{
|
|
memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
|
|
sk_from->sk_prot->obj_size - ancestor_size);
|
|
}
|
|
|
|
int inet_sk_rebuild_header(struct sock *sk);
|
|
|
|
/**
|
|
* inet_sk_state_load - read sk->sk_state for lockless contexts
|
|
* @sk: socket pointer
|
|
*
|
|
* Paired with inet_sk_state_store(). Used in places we don't hold socket lock:
|
|
* tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
|
|
*/
|
|
static inline int inet_sk_state_load(const struct sock *sk)
|
|
{
|
|
/* state change might impact lockless readers. */
|
|
return smp_load_acquire(&sk->sk_state);
|
|
}
|
|
|
|
/**
|
|
* inet_sk_state_store - update sk->sk_state
|
|
* @sk: socket pointer
|
|
* @newstate: new state
|
|
*
|
|
* Paired with inet_sk_state_load(). Should be used in contexts where
|
|
* state change might impact lockless readers.
|
|
*/
|
|
void inet_sk_state_store(struct sock *sk, int newstate);
|
|
|
|
void inet_sk_set_state(struct sock *sk, int state);
|
|
|
|
static inline unsigned int __inet_ehashfn(const __be32 laddr,
|
|
const __u16 lport,
|
|
const __be32 faddr,
|
|
const __be16 fport,
|
|
u32 initval)
|
|
{
|
|
return jhash_3words((__force __u32) laddr,
|
|
(__force __u32) faddr,
|
|
((__u32) lport) << 16 | (__force __u32)fport,
|
|
initval);
|
|
}
|
|
|
|
struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
|
|
struct sock *sk_listener,
|
|
bool attach_listener);
|
|
|
|
static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
|
|
{
|
|
__u8 flags = 0;
|
|
|
|
if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl)
|
|
flags |= FLOWI_FLAG_ANYSRC;
|
|
return flags;
|
|
}
|
|
|
|
static inline void inet_inc_convert_csum(struct sock *sk)
|
|
{
|
|
inet_sk(sk)->convert_csum++;
|
|
}
|
|
|
|
static inline void inet_dec_convert_csum(struct sock *sk)
|
|
{
|
|
if (inet_sk(sk)->convert_csum > 0)
|
|
inet_sk(sk)->convert_csum--;
|
|
}
|
|
|
|
static inline bool inet_get_convert_csum(struct sock *sk)
|
|
{
|
|
return !!inet_sk(sk)->convert_csum;
|
|
}
|
|
|
|
|
|
static inline bool inet_can_nonlocal_bind(struct net *net,
|
|
struct inet_sock *inet)
|
|
{
|
|
return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) ||
|
|
inet->freebind || inet->transparent;
|
|
}
|
|
|
|
static inline bool inet_addr_valid_or_nonlocal(struct net *net,
|
|
struct inet_sock *inet,
|
|
__be32 addr,
|
|
int addr_type)
|
|
{
|
|
return inet_can_nonlocal_bind(net, inet) ||
|
|
addr == htonl(INADDR_ANY) ||
|
|
addr_type == RTN_LOCAL ||
|
|
addr_type == RTN_MULTICAST ||
|
|
addr_type == RTN_BROADCAST;
|
|
}
|
|
|
|
#endif /* _INET_SOCK_H */
|