2012-11-15 12:49:20 +04:00
/*
* IPv6 library code , needed by static components when full IPv6 support is
* not configured or static . These functions are needed by GSO / GRO implementation .
*/
# include <linux/export.h>
2014-10-30 21:27:17 +03:00
# include <net/ip.h>
2012-11-15 12:49:20 +04:00
# include <net/ipv6.h>
# include <net/ip6_fib.h>
2013-08-31 09:44:28 +04:00
# include <net/addrconf.h>
2014-03-30 20:28:03 +04:00
# include <net/secure_seq.h>
2015-06-17 18:28:27 +03:00
# include <linux/netfilter.h>
2012-11-15 12:49:20 +04:00
2015-03-25 19:07:45 +03:00
static u32 __ipv6_select_ident ( struct net * net , u32 hashrnd ,
2015-05-23 06:55:57 +03:00
const struct in6_addr * dst ,
const struct in6_addr * src )
2015-02-04 00:36:15 +03:00
{
u32 hash , id ;
hash = __ipv6_addr_jhash ( dst , hashrnd ) ;
hash = __ipv6_addr_jhash ( src , hash ) ;
2015-03-25 19:07:45 +03:00
hash ^ = net_hash_mix ( net ) ;
2015-02-04 00:36:15 +03:00
/* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
* set the hight order instead thus minimizing possible future
* collisions .
*/
id = ip_idents_reserve ( hash , 1 ) ;
if ( unlikely ( ! id ) )
id = 1 < < 31 ;
return id ;
}
2014-10-30 21:27:17 +03:00
/* This function exists only for tap drivers that must support broken
* clients requesting UFO without specifying an IPv6 fragment ID .
*
* This is similar to ipv6_select_ident ( ) but we use an independent hash
* seed to limit information leakage .
*
* The network header must be set before calling this .
*/
2015-03-25 19:07:45 +03:00
void ipv6_proxy_select_ident ( struct net * net , struct sk_buff * skb )
2014-10-30 21:27:17 +03:00
{
static u32 ip6_proxy_idents_hashrnd __read_mostly ;
struct in6_addr buf [ 2 ] ;
struct in6_addr * addrs ;
2015-02-04 00:36:15 +03:00
u32 id ;
2014-10-30 21:27:17 +03:00
addrs = skb_header_pointer ( skb ,
skb_network_offset ( skb ) +
offsetof ( struct ipv6hdr , saddr ) ,
sizeof ( buf ) , buf ) ;
if ( ! addrs )
return ;
net_get_random_once ( & ip6_proxy_idents_hashrnd ,
sizeof ( ip6_proxy_idents_hashrnd ) ) ;
2015-03-25 19:07:45 +03:00
id = __ipv6_select_ident ( net , ip6_proxy_idents_hashrnd ,
2015-02-04 00:36:15 +03:00
& addrs [ 1 ] , & addrs [ 0 ] ) ;
2015-02-09 17:38:20 +03:00
skb_shinfo ( skb ) - > ip6_frag_id = htonl ( id ) ;
2014-10-30 21:27:17 +03:00
}
EXPORT_SYMBOL_GPL ( ipv6_proxy_select_ident ) ;
2015-05-26 02:02:21 +03:00
__be32 ipv6_select_ident ( struct net * net ,
const struct in6_addr * daddr ,
const struct in6_addr * saddr )
2015-02-04 00:36:15 +03:00
{
static u32 ip6_idents_hashrnd __read_mostly ;
u32 id ;
net_get_random_once ( & ip6_idents_hashrnd , sizeof ( ip6_idents_hashrnd ) ) ;
2015-05-23 06:55:57 +03:00
id = __ipv6_select_ident ( net , ip6_idents_hashrnd , daddr , saddr ) ;
2015-05-23 06:55:56 +03:00
return htonl ( id ) ;
2015-02-04 00:36:15 +03:00
}
EXPORT_SYMBOL ( ipv6_select_ident ) ;
2012-11-15 12:49:20 +04:00
int ip6_find_1stfragopt ( struct sk_buff * skb , u8 * * nexthdr )
{
u16 offset = sizeof ( struct ipv6hdr ) ;
struct ipv6_opt_hdr * exthdr =
( struct ipv6_opt_hdr * ) ( ipv6_hdr ( skb ) + 1 ) ;
2013-05-29 00:34:26 +04:00
unsigned int packet_len = skb_tail_pointer ( skb ) -
skb_network_header ( skb ) ;
2012-11-15 12:49:20 +04:00
int found_rhdr = 0 ;
* nexthdr = & ipv6_hdr ( skb ) - > nexthdr ;
while ( offset + 1 < = packet_len ) {
switch ( * * nexthdr ) {
case NEXTHDR_HOP :
break ;
case NEXTHDR_ROUTING :
found_rhdr = 1 ;
break ;
case NEXTHDR_DEST :
# if IS_ENABLED(CONFIG_IPV6_MIP6)
if ( ipv6_find_tlv ( skb , offset , IPV6_TLV_HAO ) > = 0 )
break ;
# endif
if ( found_rhdr )
return offset ;
break ;
2014-08-25 00:53:10 +04:00
default :
2012-11-15 12:49:20 +04:00
return offset ;
}
offset + = ipv6_optlen ( exthdr ) ;
* nexthdr = & exthdr - > nexthdr ;
exthdr = ( struct ipv6_opt_hdr * ) ( skb_network_header ( skb ) +
offset ) ;
}
return offset ;
}
EXPORT_SYMBOL ( ip6_find_1stfragopt ) ;
2013-08-31 09:44:28 +04:00
# if IS_ENABLED(CONFIG_IPV6)
int ip6_dst_hoplimit ( struct dst_entry * dst )
{
int hoplimit = dst_metric_raw ( dst , RTAX_HOPLIMIT ) ;
if ( hoplimit = = 0 ) {
struct net_device * dev = dst - > dev ;
struct inet6_dev * idev ;
rcu_read_lock ( ) ;
idev = __in6_dev_get ( dev ) ;
if ( idev )
hoplimit = idev - > cnf . hop_limit ;
else
hoplimit = dev_net ( dev ) - > ipv6 . devconf_all - > hop_limit ;
rcu_read_unlock ( ) ;
}
return hoplimit ;
}
EXPORT_SYMBOL ( ip6_dst_hoplimit ) ;
# endif
2013-08-31 09:44:29 +04:00
2015-04-06 05:19:09 +03:00
static int __ip6_local_out_sk ( struct sock * sk , struct sk_buff * skb )
2013-08-31 09:44:29 +04:00
{
2015-09-16 04:04:16 +03:00
struct net * net = dev_net ( skb_dst ( skb ) - > dev ) ;
2013-08-31 09:44:29 +04:00
int len ;
len = skb - > len - sizeof ( struct ipv6hdr ) ;
if ( len > IPV6_MAXPLEN )
len = 0 ;
ipv6_hdr ( skb ) - > payload_len = htons ( len ) ;
2014-06-09 08:37:25 +04:00
IP6CB ( skb ) - > nhoff = offsetof ( struct ipv6hdr , nexthdr ) ;
2013-08-31 09:44:29 +04:00
2015-09-16 04:04:16 +03:00
return nf_hook ( NFPROTO_IPV6 , NF_INET_LOCAL_OUT ,
net , sk , skb , NULL , skb_dst ( skb ) - > dev ,
2015-09-16 04:04:18 +03:00
dst_output_okfn ) ;
2013-08-31 09:44:29 +04:00
}
2015-04-06 05:19:09 +03:00
int __ip6_local_out ( struct sk_buff * skb )
{
return __ip6_local_out_sk ( skb - > sk , skb ) ;
}
2013-08-31 09:44:29 +04:00
EXPORT_SYMBOL_GPL ( __ip6_local_out ) ;
2015-04-06 05:19:09 +03:00
int ip6_local_out_sk ( struct sock * sk , struct sk_buff * skb )
2013-08-31 09:44:29 +04:00
{
int err ;
2015-04-06 05:19:09 +03:00
err = __ip6_local_out_sk ( sk , skb ) ;
2013-08-31 09:44:29 +04:00
if ( likely ( err = = 1 ) )
2015-09-16 04:03:53 +03:00
err = dst_output ( sk , skb ) ;
2013-08-31 09:44:29 +04:00
return err ;
}
2015-04-06 05:19:09 +03:00
EXPORT_SYMBOL_GPL ( ip6_local_out_sk ) ;
int ip6_local_out ( struct sk_buff * skb )
{
return ip6_local_out_sk ( skb - > sk , skb ) ;
}
2013-08-31 09:44:29 +04:00
EXPORT_SYMBOL_GPL ( ip6_local_out ) ;