2013-04-06 17:24:29 +04:00
/*
* IPv6 specific functions of netfilter core
*
* Rusty Russell ( C ) 2000 - - This code is GPL .
* Patrick McHardy ( C ) 2006 - 2012
*/
2005-08-10 06:39:00 +04:00
# include <linux/kernel.h>
2006-01-10 03:43:13 +03:00
# include <linux/init.h>
2005-08-10 06:39:00 +04:00
# include <linux/ipv6.h>
2005-08-10 06:42:34 +04:00
# include <linux/netfilter.h>
# include <linux/netfilter_ipv6.h>
2011-07-15 19:47:34 +04:00
# include <linux/export.h>
2013-05-17 07:56:10 +04:00
# include <net/addrconf.h>
2005-08-10 06:39:00 +04:00
# include <net/dst.h>
# include <net/ipv6.h>
# include <net/ip6_route.h>
2006-01-07 10:04:54 +03:00
# include <net/xfrm.h>
2007-12-05 12:24:48 +03:00
# include <net/netfilter/nf_queue.h>
2019-05-29 14:25:38 +03:00
# include <net/netfilter/nf_conntrack_bridge.h>
# include <net/netfilter/ipv6/nf_defrag_ipv6.h>
# include "../bridge/br_private.h"
2005-08-10 06:39:00 +04:00
2015-09-25 23:07:31 +03:00
int ip6_route_me_harder ( struct net * net , struct sk_buff * skb )
2005-08-10 06:39:00 +04:00
{
2011-04-22 08:53:02 +04:00
const struct ipv6hdr * iph = ipv6_hdr ( skb ) ;
2018-02-25 22:49:07 +03:00
struct sock * sk = sk_to_full_sk ( skb - > sk ) ;
2012-08-26 21:14:08 +04:00
unsigned int hh_len ;
2005-08-10 06:39:00 +04:00
struct dst_entry * dst ;
2019-01-21 13:45:27 +03:00
int strict = ( ipv6_addr_type ( & iph - > daddr ) &
( IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL ) ) ;
2011-03-13 00:22:43 +03:00
struct flowi6 fl6 = {
2018-11-21 16:00:30 +03:00
. flowi6_oif = sk & & sk - > sk_bound_dev_if ? sk - > sk_bound_dev_if :
2019-01-21 13:45:27 +03:00
strict ? skb_dst ( skb ) - > dev - > ifindex : 0 ,
2011-03-13 00:22:43 +03:00
. flowi6_mark = skb - > mark ,
2018-02-25 22:49:07 +03:00
. flowi6_uid = sock_net_uid ( net , sk ) ,
2011-03-13 00:22:43 +03:00
. daddr = iph - > daddr ,
. saddr = iph - > saddr ,
2005-08-10 06:39:00 +04:00
} ;
2014-05-08 17:22:35 +04:00
int err ;
2005-08-10 06:39:00 +04:00
2018-02-25 22:49:07 +03:00
dst = ip6_route_output ( net , sk , & fl6 ) ;
2014-05-08 17:22:35 +04:00
err = dst - > error ;
if ( err ) {
2008-10-15 09:55:21 +04:00
IP6_INC_STATS ( net , ip6_dst_idev ( dst ) , IPSTATS_MIB_OUTNOROUTES ) ;
2014-11-11 21:59:17 +03:00
net_dbg_ratelimited ( " ip6_route_me_harder: No more route \n " ) ;
2005-08-10 06:39:00 +04:00
dst_release ( dst ) ;
2014-05-08 17:22:35 +04:00
return err ;
2005-08-10 06:39:00 +04:00
}
/* Drop old route. */
2009-06-02 09:19:30 +04:00
skb_dst_drop ( skb ) ;
2005-08-10 06:39:00 +04:00
2009-06-02 09:19:30 +04:00
skb_dst_set ( skb , dst ) ;
2010-04-15 14:37:18 +04:00
# ifdef CONFIG_XFRM
if ( ! ( IP6CB ( skb ) - > flags & IP6SKB_XFRM_TRANSFORMED ) & &
2011-03-13 00:22:43 +03:00
xfrm_decode_session ( skb , flowi6_to_flowi ( & fl6 ) , AF_INET6 ) = = 0 ) {
2010-04-15 14:37:18 +04:00
skb_dst_set ( skb , NULL ) ;
2018-02-25 22:49:07 +03:00
dst = xfrm_lookup ( net , dst , flowi6_to_flowi ( & fl6 ) , sk , 0 ) ;
2011-03-03 00:27:41 +03:00
if ( IS_ERR ( dst ) )
2013-04-05 10:41:11 +04:00
return PTR_ERR ( dst ) ;
2010-04-15 14:37:18 +04:00
skb_dst_set ( skb , dst ) ;
}
# endif
2012-08-26 21:14:08 +04:00
/* Change in oif may mean change in hh_len. */
hh_len = skb_dst ( skb ) - > dev - > hard_header_len ;
if ( skb_headroom ( skb ) < hh_len & &
pskb_expand_head ( skb , HH_DATA_ALIGN ( hh_len - skb_headroom ( skb ) ) ,
0 , GFP_ATOMIC ) )
2013-04-05 10:41:11 +04:00
return - ENOMEM ;
2012-08-26 21:14:08 +04:00
2005-08-10 06:39:00 +04:00
return 0 ;
}
EXPORT_SYMBOL ( ip6_route_me_harder ) ;
2017-11-28 00:50:26 +03:00
static int nf_ip6_reroute ( struct sk_buff * skb ,
2007-12-05 12:26:33 +03:00
const struct nf_queue_entry * entry )
2005-08-10 06:42:34 +04:00
{
2007-12-05 12:26:33 +03:00
struct ip6_rt_info * rt_info = nf_queue_entry_reroute ( entry ) ;
2005-08-10 06:42:34 +04:00
2015-04-03 23:31:01 +03:00
if ( entry - > state . hook = = NF_INET_LOCAL_OUT ) {
2011-04-22 08:53:02 +04:00
const struct ipv6hdr * iph = ipv6_hdr ( skb ) ;
2005-08-10 06:42:34 +04:00
if ( ! ipv6_addr_equal ( & iph - > daddr , & rt_info - > daddr ) | |
2008-11-25 14:18:11 +03:00
! ipv6_addr_equal ( & iph - > saddr , & rt_info - > saddr ) | |
skb - > mark ! = rt_info - > mark )
2017-11-28 00:50:26 +03:00
return ip6_route_me_harder ( entry - > state . net , skb ) ;
2005-08-10 06:42:34 +04:00
}
return 0 ;
}
2019-02-02 12:17:00 +03:00
int __nf_ip6_route ( struct net * net , struct dst_entry * * dst ,
struct flowi * fl , bool strict )
2007-12-05 12:22:05 +03:00
{
2011-04-04 19:00:54 +04:00
static const struct ipv6_pinfo fake_pinfo ;
static const struct inet_sock fake_sk = {
/* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
. sk . sk_bound_dev_if = 1 ,
. pinet6 = ( struct ipv6_pinfo * ) & fake_pinfo ,
} ;
const void * sk = strict ? & fake_sk : NULL ;
2011-10-19 15:23:06 +04:00
struct dst_entry * result ;
int err ;
result = ip6_route_output ( net , sk , & fl - > u . ip6 ) ;
err = result - > error ;
if ( err )
dst_release ( result ) ;
else
* dst = result ;
return err ;
2007-12-05 12:22:05 +03:00
}
2019-02-02 12:17:00 +03:00
EXPORT_SYMBOL_GPL ( __nf_ip6_route ) ;
2007-12-05 12:22:05 +03:00
2019-05-29 14:25:38 +03:00
int br_ip6_fragment ( struct net * net , struct sock * sk , struct sk_buff * skb ,
struct nf_ct_bridge_frag_data * data ,
int ( * output ) ( struct net * , struct sock * sk ,
const struct nf_ct_bridge_frag_data * data ,
struct sk_buff * ) )
{
int frag_max_size = BR_INPUT_SKB_CB ( skb ) - > frag_max_size ;
struct ip6_frag_state state ;
u8 * prevhdr , nexthdr = 0 ;
unsigned int mtu , hlen ;
int hroom , err = 0 ;
__be32 frag_id ;
err = ip6_find_1stfragopt ( skb , & prevhdr ) ;
if ( err < 0 )
goto blackhole ;
hlen = err ;
nexthdr = * prevhdr ;
mtu = skb - > dev - > mtu ;
if ( frag_max_size > mtu | |
frag_max_size < IPV6_MIN_MTU )
goto blackhole ;
mtu = frag_max_size ;
if ( mtu < hlen + sizeof ( struct frag_hdr ) + 8 )
goto blackhole ;
mtu - = hlen + sizeof ( struct frag_hdr ) ;
frag_id = ipv6_select_ident ( net , & ipv6_hdr ( skb ) - > daddr ,
& ipv6_hdr ( skb ) - > saddr ) ;
if ( skb - > ip_summed = = CHECKSUM_PARTIAL & &
( err = skb_checksum_help ( skb ) ) )
goto blackhole ;
hroom = LL_RESERVED_SPACE ( skb - > dev ) ;
if ( skb_has_frag_list ( skb ) ) {
unsigned int first_len = skb_pagelen ( skb ) ;
struct ip6_fraglist_iter iter ;
struct sk_buff * frag2 ;
if ( first_len - hlen > mtu | |
skb_headroom ( skb ) < ( hroom + sizeof ( struct frag_hdr ) ) )
goto blackhole ;
if ( skb_cloned ( skb ) )
goto slow_path ;
skb_walk_frags ( skb , frag2 ) {
if ( frag2 - > len > mtu | |
skb_headroom ( frag2 ) < ( hlen + hroom + sizeof ( struct frag_hdr ) ) )
goto blackhole ;
/* Partially cloned skb? */
if ( skb_shared ( frag2 ) )
goto slow_path ;
}
err = ip6_fraglist_init ( skb , hlen , prevhdr , nexthdr , frag_id ,
& iter ) ;
if ( err < 0 )
goto blackhole ;
for ( ; ; ) {
/* Prepare header of the next frame,
* before previous one went down .
*/
if ( iter . frag )
ip6_fraglist_prepare ( skb , & iter ) ;
err = output ( net , sk , data , skb ) ;
if ( err | | ! iter . frag )
break ;
skb = ip6_fraglist_next ( & iter ) ;
}
kfree ( iter . tmp_hdr ) ;
if ( ! err )
return 0 ;
2019-06-02 21:24:18 +03:00
kfree_skb_list ( iter . frag ) ;
2019-05-29 14:25:38 +03:00
return err ;
}
slow_path :
/* This is a linearized skbuff, the original geometry is lost for us.
* This may also be a clone skbuff , we could preserve the geometry for
* the copies but probably not worth the effort .
*/
ip6_frag_init ( skb , hlen , mtu , skb - > dev - > needed_tailroom ,
LL_RESERVED_SPACE ( skb - > dev ) , prevhdr , nexthdr , frag_id ,
& state ) ;
while ( state . left > 0 ) {
struct sk_buff * skb2 ;
skb2 = ip6_frag_next ( skb , & state ) ;
if ( IS_ERR ( skb2 ) ) {
err = PTR_ERR ( skb2 ) ;
goto blackhole ;
}
err = output ( net , sk , data , skb2 ) ;
if ( err )
goto blackhole ;
}
consume_skb ( skb ) ;
return err ;
blackhole :
kfree_skb ( skb ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( br_ip6_fragment ) ;
2013-05-17 07:56:10 +04:00
static const struct nf_ipv6_ops ipv6ops = {
2019-02-02 12:16:59 +03:00
# if IS_MODULE(CONFIG_IPV6)
2017-12-20 18:04:18 +03:00
. chk_addr = ipv6_chk_addr ,
2019-02-02 12:16:59 +03:00
. route_me_harder = ip6_route_me_harder ,
. dev_get_saddr = ipv6_dev_get_saddr ,
2019-02-02 12:17:00 +03:00
. route = __nf_ip6_route ,
2019-06-19 15:54:36 +03:00
# if IS_ENABLED(CONFIG_SYN_COOKIES)
2019-06-07 03:36:05 +03:00
. cookie_init_sequence = __cookie_v6_init_sequence ,
. cookie_v6_check = __cookie_v6_check ,
2019-06-19 15:54:36 +03:00
# endif
2019-02-02 12:16:59 +03:00
# endif
2019-02-02 12:17:00 +03:00
. route_input = ip6_route_input ,
2017-12-20 18:04:18 +03:00
. fragment = ip6_fragment ,
2017-11-28 00:50:26 +03:00
. reroute = nf_ip6_reroute ,
2019-06-02 16:49:26 +03:00
# if IS_MODULE(CONFIG_IPV6) && IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
2019-05-29 14:25:38 +03:00
. br_defrag = nf_ct_frag6_gather ,
2019-06-02 16:49:26 +03:00
# endif
# if IS_MODULE(CONFIG_IPV6)
2019-05-29 14:25:38 +03:00
. br_fragment = br_ip6_fragment ,
# endif
2013-05-17 07:56:10 +04:00
} ;
2005-08-10 06:42:34 +04:00
int __init ipv6_netfilter_init ( void )
{
2013-05-17 07:56:10 +04:00
RCU_INIT_POINTER ( nf_ipv6_ops , & ipv6ops ) ;
2017-12-09 19:05:53 +03:00
return 0 ;
2005-08-10 06:42:34 +04:00
}
2006-01-11 08:02:21 +03:00
/* This can be called from inet6_init() on errors, so it cannot
* be marked __exit . - DaveM
*/
void ipv6_netfilter_fini ( void )
2005-08-10 06:42:34 +04:00
{
2013-05-17 07:56:10 +04:00
RCU_INIT_POINTER ( nf_ipv6_ops , NULL ) ;
2005-08-10 06:42:34 +04:00
}