2008-10-08 11:35:02 +02:00
# ifndef __NETNS_CONNTRACK_H
# define __NETNS_CONNTRACK_H
2008-10-08 11:35:04 +02:00
# include <linux/list.h>
2009-03-25 21:05:46 +01:00
# include <linux/list_nulls.h>
2011-07-26 16:09:06 -07:00
# include <linux/atomic.h>
2014-06-10 23:12:56 +02:00
# include <linux/workqueue.h>
2012-05-28 21:04:12 +00:00
# include <linux/netfilter/nf_conntrack_tcp.h>
2014-03-03 14:46:13 +01:00
# include <linux/seqlock.h>
2008-10-08 11:35:03 +02:00
2008-10-08 11:35:08 +02:00
struct ctl_table_header ;
2008-10-08 11:35:07 +02:00
struct nf_conntrack_ecache ;
2012-05-28 21:04:09 +00:00
struct nf_proto_net {
# ifdef CONFIG_SYSCTL
struct ctl_table_header * ctl_table_header ;
struct ctl_table * ctl_table ;
# ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
struct ctl_table_header * ctl_compat_header ;
struct ctl_table * ctl_compat_table ;
# endif
# endif
unsigned int users ;
} ;
2012-05-28 21:04:11 +00:00
struct nf_generic_net {
struct nf_proto_net pn ;
unsigned int timeout ;
} ;
2012-05-28 21:04:12 +00:00
struct nf_tcp_net {
struct nf_proto_net pn ;
unsigned int timeouts [ TCP_CONNTRACK_TIMEOUT_MAX ] ;
unsigned int tcp_loose ;
unsigned int tcp_be_liberal ;
unsigned int tcp_max_retrans ;
} ;
2012-05-28 21:04:13 +00:00
enum udp_conntrack {
UDP_CT_UNREPLIED ,
UDP_CT_REPLIED ,
UDP_CT_MAX
} ;
struct nf_udp_net {
struct nf_proto_net pn ;
unsigned int timeouts [ UDP_CT_MAX ] ;
} ;
2012-05-28 21:04:14 +00:00
struct nf_icmp_net {
struct nf_proto_net pn ;
unsigned int timeout ;
} ;
2012-05-28 21:04:10 +00:00
struct nf_ip_net {
2012-05-28 21:04:11 +00:00
struct nf_generic_net generic ;
2012-05-28 21:04:12 +00:00
struct nf_tcp_net tcp ;
2012-05-28 21:04:13 +00:00
struct nf_udp_net udp ;
2012-05-28 21:04:14 +00:00
struct nf_icmp_net icmp ;
2012-05-28 21:04:15 +00:00
struct nf_icmp_net icmpv6 ;
2012-05-28 21:04:10 +00:00
# if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
struct ctl_table_header * ctl_table_header ;
struct ctl_table * ctl_table ;
# endif
} ;
2014-03-03 14:45:20 +01:00
struct ct_pcpu {
spinlock_t lock ;
struct hlist_nulls_head unconfirmed ;
struct hlist_nulls_head dying ;
struct hlist_nulls_head tmpl ;
} ;
2008-10-08 11:35:02 +02:00
struct netns_ct {
2008-10-08 11:35:03 +02:00
atomic_t count ;
2008-10-08 11:35:03 +02:00
unsigned int expect_count ;
2014-06-10 23:12:56 +02:00
# ifdef CONFIG_NF_CONNTRACK_EVENTS
struct delayed_work ecache_dwork ;
bool ecache_dwork_pending ;
# endif
2013-11-15 15:57:53 +01:00
# ifdef CONFIG_SYSCTL
struct ctl_table_header * sysctl_header ;
struct ctl_table_header * acct_sysctl_header ;
struct ctl_table_header * tstamp_sysctl_header ;
struct ctl_table_header * event_sysctl_header ;
struct ctl_table_header * helper_sysctl_header ;
# endif
char * slabname ;
unsigned int sysctl_log_invalid ; /* Log invalid packets */
int sysctl_events ;
int sysctl_acct ;
int sysctl_auto_assign_helper ;
bool auto_assign_helper_warned ;
int sysctl_tstamp ;
int sysctl_checksum ;
2010-02-08 11:18:07 -08:00
unsigned int htable_size ;
2014-03-03 14:46:13 +01:00
seqcount_t generation ;
2010-02-08 11:16:56 -08:00
struct kmem_cache * nf_conntrack_cachep ;
2009-03-25 21:05:46 +01:00
struct hlist_nulls_head * hash ;
2008-10-08 11:35:03 +02:00
struct hlist_head * expect_hash ;
2014-03-03 14:45:20 +01:00
struct ct_pcpu __percpu * pcpu_lists ;
2010-02-16 15:20:26 +00:00
struct ip_conntrack_stat __percpu * stat ;
2011-11-22 00:16:51 +01:00
struct nf_ct_event_notifier __rcu * nf_conntrack_event_cb ;
struct nf_exp_event_notifier __rcu * nf_expect_event_cb ;
2012-05-28 21:04:10 +00:00
struct nf_ip_net nf_ct_proto ;
2013-01-11 06:30:44 +00:00
# if defined(CONFIG_NF_CONNTRACK_LABELS)
unsigned int labels_used ;
u8 label_words ;
# endif
2012-08-26 19:14:06 +02:00
# ifdef CONFIG_NF_NAT_NEEDED
struct hlist_head * nat_bysource ;
unsigned int nat_htable_size ;
# endif
2008-10-08 11:35:02 +02:00
} ;
# endif