2005-04-17 02:20:36 +04:00
/*
* TCP over IPv6
* Linux INET6 implementation
*
* Authors :
* Pedro Roque < roque @ di . fc . ul . pt >
*
* $ Id : tcp_ipv6 . c , v 1.144 2002 / 02 / 01 22 : 01 : 04 davem Exp $
*
* Based on :
* linux / net / ipv4 / tcp . c
* linux / net / ipv4 / tcp_input . c
* linux / net / ipv4 / tcp_output . c
*
* Fixes :
* Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI Hideaki @ USAGI and : Support IPV6_V6ONLY socket option , which
* Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
* a single port at the same time .
* YOSHIFUJI Hideaki @ USAGI : convert / proc / net / tcp6 to seq_file .
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/module.h>
# include <linux/errno.h>
# include <linux/types.h>
# include <linux/socket.h>
# include <linux/sockios.h>
# include <linux/net.h>
# include <linux/jiffies.h>
# include <linux/in.h>
# include <linux/in6.h>
# include <linux/netdevice.h>
# include <linux/init.h>
# include <linux/jhash.h>
# include <linux/ipsec.h>
# include <linux/times.h>
# include <linux/ipv6.h>
# include <linux/icmpv6.h>
# include <linux/random.h>
# include <net/tcp.h>
# include <net/ndisc.h>
2005-08-12 16:26:18 +04:00
# include <net/inet6_hashtables.h>
2005-12-14 10:15:24 +03:00
# include <net/inet6_connection_sock.h>
2005-04-17 02:20:36 +04:00
# include <net/ipv6.h>
# include <net/transp_v6.h>
# include <net/addrconf.h>
# include <net/ip6_route.h>
# include <net/ip6_checksum.h>
# include <net/inet_ecn.h>
# include <net/protocol.h>
# include <net/xfrm.h>
# include <net/addrconf.h>
# include <net/snmp.h>
# include <net/dsfield.h>
2005-12-14 10:25:19 +03:00
# include <net/timewait_sock.h>
2005-04-17 02:20:36 +04:00
# include <asm/uaccess.h>
# include <linux/proc_fs.h>
# include <linux/seq_file.h>
2006-11-15 06:07:45 +03:00
# include <linux/crypto.h>
# include <linux/scatterlist.h>
2006-01-12 02:53:04 +03:00
/* Socket used for sending RSTs and ACKs */
static struct socket * tcp6_socket ;
2006-11-15 06:07:45 +03:00
static void tcp_v6_send_reset ( struct sock * sk , struct sk_buff * skb ) ;
2005-06-19 09:47:21 +04:00
static void tcp_v6_reqsk_send_ack ( struct sk_buff * skb , struct request_sock * req ) ;
2005-12-14 10:15:52 +03:00
static void tcp_v6_send_check ( struct sock * sk , int len ,
2005-04-17 02:20:36 +04:00
struct sk_buff * skb ) ;
static int tcp_v6_do_rcv ( struct sock * sk , struct sk_buff * skb ) ;
2005-12-14 10:15:52 +03:00
static struct inet_connection_sock_af_ops ipv6_mapped ;
static struct inet_connection_sock_af_ops ipv6_specific ;
2006-11-15 06:53:22 +03:00
# ifdef CONFIG_TCP_MD5SIG
2006-11-15 06:07:45 +03:00
static struct tcp_sock_af_ops tcp_sock_ipv6_specific ;
static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific ;
2006-11-15 06:53:22 +03:00
# endif
2005-04-17 02:20:36 +04:00
static int tcp_v6_get_port ( struct sock * sk , unsigned short snum )
{
2005-12-14 10:14:47 +03:00
return inet_csk_get_port ( & tcp_hashinfo , sk , snum ,
inet6_csk_bind_conflict ) ;
2005-04-17 02:20:36 +04:00
}
static void tcp_v6_hash ( struct sock * sk )
{
if ( sk - > sk_state ! = TCP_CLOSE ) {
2005-12-14 10:15:52 +03:00
if ( inet_csk ( sk ) - > icsk_af_ops = = & ipv6_mapped ) {
2005-04-17 02:20:36 +04:00
tcp_prot . hash ( sk ) ;
return ;
}
local_bh_disable ( ) ;
2005-12-14 10:15:01 +03:00
__inet6_hash ( & tcp_hashinfo , sk ) ;
2005-04-17 02:20:36 +04:00
local_bh_enable ( ) ;
}
}
2006-11-15 08:35:48 +03:00
static __inline__ __sum16 tcp_v6_check ( struct tcphdr * th , int len ,
2005-04-17 02:20:36 +04:00
struct in6_addr * saddr ,
struct in6_addr * daddr ,
2006-11-15 08:35:48 +03:00
__wsum base )
2005-04-17 02:20:36 +04:00
{
return csum_ipv6_magic ( saddr , daddr , len , IPPROTO_TCP , base ) ;
}
2006-11-11 01:06:49 +03:00
static __u32 tcp_v6_init_sequence ( struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
2006-11-11 01:06:49 +03:00
return secure_tcpv6_sequence_number ( skb - > nh . ipv6h - > daddr . s6_addr32 ,
skb - > nh . ipv6h - > saddr . s6_addr32 ,
skb - > h . th - > dest ,
skb - > h . th - > source ) ;
2005-04-17 02:20:36 +04:00
}
static int tcp_v6_connect ( struct sock * sk , struct sockaddr * uaddr ,
int addr_len )
{
struct sockaddr_in6 * usin = ( struct sockaddr_in6 * ) uaddr ;
2005-12-14 10:26:10 +03:00
struct inet_sock * inet = inet_sk ( sk ) ;
struct inet_connection_sock * icsk = inet_csk ( sk ) ;
2005-04-17 02:20:36 +04:00
struct ipv6_pinfo * np = inet6_sk ( sk ) ;
struct tcp_sock * tp = tcp_sk ( sk ) ;
struct in6_addr * saddr = NULL , * final_p = NULL , final ;
struct flowi fl ;
struct dst_entry * dst ;
int addr_type ;
int err ;
if ( addr_len < SIN6_LEN_RFC2133 )
return - EINVAL ;
if ( usin - > sin6_family ! = AF_INET6 )
return ( - EAFNOSUPPORT ) ;
memset ( & fl , 0 , sizeof ( fl ) ) ;
if ( np - > sndflow ) {
fl . fl6_flowlabel = usin - > sin6_flowinfo & IPV6_FLOWINFO_MASK ;
IP6_ECN_flow_init ( fl . fl6_flowlabel ) ;
if ( fl . fl6_flowlabel & IPV6_FLOWLABEL_MASK ) {
struct ip6_flowlabel * flowlabel ;
flowlabel = fl6_sock_lookup ( sk , fl . fl6_flowlabel ) ;
if ( flowlabel = = NULL )
return - EINVAL ;
ipv6_addr_copy ( & usin - > sin6_addr , & flowlabel - > dst ) ;
fl6_sock_release ( flowlabel ) ;
}
}
/*
* connect ( ) to INADDR_ANY means loopback ( BSD ' ism ) .
*/
if ( ipv6_addr_any ( & usin - > sin6_addr ) )
usin - > sin6_addr . s6_addr [ 15 ] = 0x1 ;
addr_type = ipv6_addr_type ( & usin - > sin6_addr ) ;
if ( addr_type & IPV6_ADDR_MULTICAST )
return - ENETUNREACH ;
if ( addr_type & IPV6_ADDR_LINKLOCAL ) {
if ( addr_len > = sizeof ( struct sockaddr_in6 ) & &
usin - > sin6_scope_id ) {
/* If interface is set while binding, indices
* must coincide .
*/
if ( sk - > sk_bound_dev_if & &
sk - > sk_bound_dev_if ! = usin - > sin6_scope_id )
return - EINVAL ;
sk - > sk_bound_dev_if = usin - > sin6_scope_id ;
}
/* Connect to link-local address requires an interface */
if ( ! sk - > sk_bound_dev_if )
return - EINVAL ;
}
if ( tp - > rx_opt . ts_recent_stamp & &
! ipv6_addr_equal ( & np - > daddr , & usin - > sin6_addr ) ) {
tp - > rx_opt . ts_recent = 0 ;
tp - > rx_opt . ts_recent_stamp = 0 ;
tp - > write_seq = 0 ;
}
ipv6_addr_copy ( & np - > daddr , & usin - > sin6_addr ) ;
np - > flow_label = fl . fl6_flowlabel ;
/*
* TCP over IPv4
*/
if ( addr_type = = IPV6_ADDR_MAPPED ) {
2005-12-14 10:26:10 +03:00
u32 exthdrlen = icsk - > icsk_ext_hdr_len ;
2005-04-17 02:20:36 +04:00
struct sockaddr_in sin ;
SOCK_DEBUG ( sk , " connect: ipv4 mapped \n " ) ;
if ( __ipv6_only_sock ( sk ) )
return - ENETUNREACH ;
sin . sin_family = AF_INET ;
sin . sin_port = usin - > sin6_port ;
sin . sin_addr . s_addr = usin - > sin6_addr . s6_addr32 [ 3 ] ;
2005-12-14 10:26:10 +03:00
icsk - > icsk_af_ops = & ipv6_mapped ;
2005-04-17 02:20:36 +04:00
sk - > sk_backlog_rcv = tcp_v4_do_rcv ;
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
tp - > af_specific = & tcp_sock_ipv6_mapped_specific ;
# endif
2005-04-17 02:20:36 +04:00
err = tcp_v4_connect ( sk , ( struct sockaddr * ) & sin , sizeof ( sin ) ) ;
if ( err ) {
2005-12-14 10:26:10 +03:00
icsk - > icsk_ext_hdr_len = exthdrlen ;
icsk - > icsk_af_ops = & ipv6_specific ;
2005-04-17 02:20:36 +04:00
sk - > sk_backlog_rcv = tcp_v6_do_rcv ;
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
tp - > af_specific = & tcp_sock_ipv6_specific ;
# endif
2005-04-17 02:20:36 +04:00
goto failure ;
} else {
ipv6_addr_set ( & np - > saddr , 0 , 0 , htonl ( 0x0000FFFF ) ,
inet - > saddr ) ;
ipv6_addr_set ( & np - > rcv_saddr , 0 , 0 , htonl ( 0x0000FFFF ) ,
inet - > rcv_saddr ) ;
}
return err ;
}
if ( ! ipv6_addr_any ( & np - > rcv_saddr ) )
saddr = & np - > rcv_saddr ;
fl . proto = IPPROTO_TCP ;
ipv6_addr_copy ( & fl . fl6_dst , & np - > daddr ) ;
ipv6_addr_copy ( & fl . fl6_src ,
( saddr ? saddr : & np - > saddr ) ) ;
fl . oif = sk - > sk_bound_dev_if ;
fl . fl_ip_dport = usin - > sin6_port ;
fl . fl_ip_sport = inet - > sport ;
if ( np - > opt & & np - > opt - > srcrt ) {
struct rt0_hdr * rt0 = ( struct rt0_hdr * ) np - > opt - > srcrt ;
ipv6_addr_copy ( & final , & fl . fl6_dst ) ;
ipv6_addr_copy ( & fl . fl6_dst , rt0 - > addr ) ;
final_p = & final ;
}
2006-08-05 10:12:42 +04:00
security_sk_classify_flow ( sk , & fl ) ;
2005-04-17 02:20:36 +04:00
err = ip6_dst_lookup ( sk , & dst , & fl ) ;
if ( err )
goto failure ;
if ( final_p )
ipv6_addr_copy ( & fl . fl6_dst , final_p ) ;
2005-09-09 02:11:55 +04:00
if ( ( err = xfrm_lookup ( & dst , & fl , sk , 0 ) ) < 0 )
2005-04-17 02:20:36 +04:00
goto failure ;
if ( saddr = = NULL ) {
saddr = & fl . fl6_src ;
ipv6_addr_copy ( & np - > rcv_saddr , saddr ) ;
}
/* set the source address */
ipv6_addr_copy ( & np - > saddr , saddr ) ;
inet - > rcv_saddr = LOOPBACK4_IPV6 ;
2006-07-01 00:37:03 +04:00
sk - > sk_gso_type = SKB_GSO_TCPV6 ;
2006-08-30 04:15:09 +04:00
__ip6_dst_store ( sk , dst , NULL , NULL ) ;
2005-04-17 02:20:36 +04:00
2005-12-14 10:26:10 +03:00
icsk - > icsk_ext_hdr_len = 0 ;
2005-04-17 02:20:36 +04:00
if ( np - > opt )
2005-12-14 10:26:10 +03:00
icsk - > icsk_ext_hdr_len = ( np - > opt - > opt_flen +
np - > opt - > opt_nflen ) ;
2005-04-17 02:20:36 +04:00
tp - > rx_opt . mss_clamp = IPV6_MIN_MTU - sizeof ( struct tcphdr ) - sizeof ( struct ipv6hdr ) ;
inet - > dport = usin - > sin6_port ;
tcp_set_state ( sk , TCP_SYN_SENT ) ;
2005-12-14 10:25:44 +03:00
err = inet6_hash_connect ( & tcp_death_row , sk ) ;
2005-04-17 02:20:36 +04:00
if ( err )
goto late_failure ;
if ( ! tp - > write_seq )
tp - > write_seq = secure_tcpv6_sequence_number ( np - > saddr . s6_addr32 ,
np - > daddr . s6_addr32 ,
inet - > sport ,
inet - > dport ) ;
err = tcp_connect ( sk ) ;
if ( err )
goto late_failure ;
return 0 ;
late_failure :
tcp_set_state ( sk , TCP_CLOSE ) ;
__sk_dst_reset ( sk ) ;
failure :
inet - > dport = 0 ;
sk - > sk_route_caps = 0 ;
return err ;
}
static void tcp_v6_err ( struct sk_buff * skb , struct inet6_skb_parm * opt ,
2006-11-08 11:21:01 +03:00
int type , int code , int offset , __be32 info )
2005-04-17 02:20:36 +04:00
{
struct ipv6hdr * hdr = ( struct ipv6hdr * ) skb - > data ;
2005-08-12 16:19:38 +04:00
const struct tcphdr * th = ( struct tcphdr * ) ( skb - > data + offset ) ;
2005-04-17 02:20:36 +04:00
struct ipv6_pinfo * np ;
struct sock * sk ;
int err ;
struct tcp_sock * tp ;
__u32 seq ;
2005-08-12 16:19:38 +04:00
sk = inet6_lookup ( & tcp_hashinfo , & hdr - > daddr , th - > dest , & hdr - > saddr ,
th - > source , skb - > dev - > ifindex ) ;
2005-04-17 02:20:36 +04:00
if ( sk = = NULL ) {
ICMP6_INC_STATS_BH ( __in6_dev_get ( skb - > dev ) , ICMP6_MIB_INERRORS ) ;
return ;
}
if ( sk - > sk_state = = TCP_TIME_WAIT ) {
2006-10-11 06:41:46 +04:00
inet_twsk_put ( inet_twsk ( sk ) ) ;
2005-04-17 02:20:36 +04:00
return ;
}
bh_lock_sock ( sk ) ;
if ( sock_owned_by_user ( sk ) )
NET_INC_STATS_BH ( LINUX_MIB_LOCKDROPPEDICMPS ) ;
if ( sk - > sk_state = = TCP_CLOSE )
goto out ;
tp = tcp_sk ( sk ) ;
seq = ntohl ( th - > seq ) ;
if ( sk - > sk_state ! = TCP_LISTEN & &
! between ( seq , tp - > snd_una , tp - > snd_nxt ) ) {
NET_INC_STATS_BH ( LINUX_MIB_OUTOFWINDOWICMPS ) ;
goto out ;
}
np = inet6_sk ( sk ) ;
if ( type = = ICMPV6_PKT_TOOBIG ) {
struct dst_entry * dst = NULL ;
if ( sock_owned_by_user ( sk ) )
goto out ;
if ( ( 1 < < sk - > sk_state ) & ( TCPF_LISTEN | TCPF_CLOSE ) )
goto out ;
/* icmp should have updated the destination cache entry */
dst = __sk_dst_check ( sk , np - > dst_cookie ) ;
if ( dst = = NULL ) {
struct inet_sock * inet = inet_sk ( sk ) ;
struct flowi fl ;
/* BUGGG_FUTURE: Again, it is not clear how
to handle rthdr case . Ignore this complexity
for now .
*/
memset ( & fl , 0 , sizeof ( fl ) ) ;
fl . proto = IPPROTO_TCP ;
ipv6_addr_copy ( & fl . fl6_dst , & np - > daddr ) ;
ipv6_addr_copy ( & fl . fl6_src , & np - > saddr ) ;
fl . oif = sk - > sk_bound_dev_if ;
fl . fl_ip_dport = inet - > dport ;
fl . fl_ip_sport = inet - > sport ;
2006-08-05 10:12:42 +04:00
security_skb_classify_flow ( skb , & fl ) ;
2005-04-17 02:20:36 +04:00
if ( ( err = ip6_dst_lookup ( sk , & dst , & fl ) ) ) {
sk - > sk_err_soft = - err ;
goto out ;
}
if ( ( err = xfrm_lookup ( & dst , & fl , sk , 0 ) ) < 0 ) {
sk - > sk_err_soft = - err ;
goto out ;
}
} else
dst_hold ( dst ) ;
2005-12-14 10:26:10 +03:00
if ( inet_csk ( sk ) - > icsk_pmtu_cookie > dst_mtu ( dst ) ) {
2005-04-17 02:20:36 +04:00
tcp_sync_mss ( sk , dst_mtu ( dst ) ) ;
tcp_simple_retransmit ( sk ) ;
} /* else let the usual retransmit timer handle it */
dst_release ( dst ) ;
goto out ;
}
icmpv6_err_convert ( type , code , & err ) ;
2005-06-19 09:47:21 +04:00
/* Might be for an request_sock */
2005-04-17 02:20:36 +04:00
switch ( sk - > sk_state ) {
2005-06-19 09:47:21 +04:00
struct request_sock * req , * * prev ;
2005-04-17 02:20:36 +04:00
case TCP_LISTEN :
if ( sock_owned_by_user ( sk ) )
goto out ;
2005-12-14 10:15:24 +03:00
req = inet6_csk_search_req ( sk , & prev , th - > dest , & hdr - > daddr ,
& hdr - > saddr , inet6_iif ( skb ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! req )
goto out ;
/* ICMPs are not backlogged, hence we cannot get
* an established socket here .
*/
BUG_TRAP ( req - > sk = = NULL ) ;
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
if ( seq ! = tcp_rsk ( req ) - > snt_isn ) {
2005-04-17 02:20:36 +04:00
NET_INC_STATS_BH ( LINUX_MIB_OUTOFWINDOWICMPS ) ;
goto out ;
}
2005-08-10 07:10:42 +04:00
inet_csk_reqsk_queue_drop ( sk , req , prev ) ;
2005-04-17 02:20:36 +04:00
goto out ;
case TCP_SYN_SENT :
case TCP_SYN_RECV : /* Cannot happen.
It can , it SYNs are crossed . - - ANK */
if ( ! sock_owned_by_user ( sk ) ) {
sk - > sk_err = err ;
sk - > sk_error_report ( sk ) ; /* Wake people up to see the error (see connect in sock.c) */
tcp_done ( sk ) ;
} else
sk - > sk_err_soft = err ;
goto out ;
}
if ( ! sock_owned_by_user ( sk ) & & np - > recverr ) {
sk - > sk_err = err ;
sk - > sk_error_report ( sk ) ;
} else
sk - > sk_err_soft = err ;
out :
bh_unlock_sock ( sk ) ;
sock_put ( sk ) ;
}
2005-06-19 09:47:21 +04:00
static int tcp_v6_send_synack ( struct sock * sk , struct request_sock * req ,
2005-04-17 02:20:36 +04:00
struct dst_entry * dst )
{
2005-12-14 10:15:40 +03:00
struct inet6_request_sock * treq = inet6_rsk ( req ) ;
2005-04-17 02:20:36 +04:00
struct ipv6_pinfo * np = inet6_sk ( sk ) ;
struct sk_buff * skb ;
struct ipv6_txoptions * opt = NULL ;
struct in6_addr * final_p = NULL , final ;
struct flowi fl ;
int err = - 1 ;
memset ( & fl , 0 , sizeof ( fl ) ) ;
fl . proto = IPPROTO_TCP ;
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
ipv6_addr_copy ( & fl . fl6_dst , & treq - > rmt_addr ) ;
ipv6_addr_copy ( & fl . fl6_src , & treq - > loc_addr ) ;
2005-04-17 02:20:36 +04:00
fl . fl6_flowlabel = 0 ;
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
fl . oif = treq - > iif ;
fl . fl_ip_dport = inet_rsk ( req ) - > rmt_port ;
2005-04-17 02:20:36 +04:00
fl . fl_ip_sport = inet_sk ( sk ) - > sport ;
2006-07-25 10:32:50 +04:00
security_req_classify_flow ( req , & fl ) ;
2005-04-17 02:20:36 +04:00
if ( dst = = NULL ) {
opt = np - > opt ;
if ( opt = = NULL & &
[IPV6]: Support several new sockopt / ancillary data in Advanced API (RFC3542).
Support several new socket options / ancillary data:
IPV6_RECVPKTINFO, IPV6_PKTINFO,
IPV6_RECVHOPOPTS, IPV6_HOPOPTS,
IPV6_RECVDSTOPTS, IPV6_DSTOPTS, IPV6_RTHDRDSTOPTS,
IPV6_RECVRTHDR, IPV6_RTHDR,
IPV6_RECVHOPOPTS, IPV6_HOPOPTS
Old semantics are preserved as IPV6_2292xxxx so that
we can maintain backward compatibility.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
2005-09-08 04:59:17 +04:00
np - > rxopt . bits . osrcrt = = 2 & &
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
treq - > pktopts ) {
struct sk_buff * pktopts = treq - > pktopts ;
2005-04-17 02:20:36 +04:00
struct inet6_skb_parm * rxopt = IP6CB ( pktopts ) ;
if ( rxopt - > srcrt )
opt = ipv6_invert_rthdr ( sk , ( struct ipv6_rt_hdr * ) ( pktopts - > nh . raw + rxopt - > srcrt ) ) ;
}
if ( opt & & opt - > srcrt ) {
struct rt0_hdr * rt0 = ( struct rt0_hdr * ) opt - > srcrt ;
ipv6_addr_copy ( & final , & fl . fl6_dst ) ;
ipv6_addr_copy ( & fl . fl6_dst , rt0 - > addr ) ;
final_p = & final ;
}
err = ip6_dst_lookup ( sk , & dst , & fl ) ;
if ( err )
goto done ;
if ( final_p )
ipv6_addr_copy ( & fl . fl6_dst , final_p ) ;
if ( ( err = xfrm_lookup ( & dst , & fl , sk , 0 ) ) < 0 )
goto done ;
}
skb = tcp_make_synack ( sk , dst , req ) ;
if ( skb ) {
struct tcphdr * th = skb - > h . th ;
th - > check = tcp_v6_check ( th , skb - > len ,
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
& treq - > loc_addr , & treq - > rmt_addr ,
2005-04-17 02:20:36 +04:00
csum_partial ( ( char * ) th , skb - > len , skb - > csum ) ) ;
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
ipv6_addr_copy ( & fl . fl6_dst , & treq - > rmt_addr ) ;
2005-04-17 02:20:36 +04:00
err = ip6_xmit ( sk , skb , & fl , opt , 0 ) ;
2006-11-14 16:21:36 +03:00
err = net_xmit_eval ( err ) ;
2005-04-17 02:20:36 +04:00
}
done :
if ( opt & & opt ! = np - > opt )
sock_kfree_s ( sk , opt , opt - > tot_len ) ;
2006-02-01 04:51:44 +03:00
dst_release ( dst ) ;
2005-04-17 02:20:36 +04:00
return err ;
}
2005-06-19 09:47:21 +04:00
static void tcp_v6_reqsk_destructor ( struct request_sock * req )
2005-04-17 02:20:36 +04:00
{
2005-12-14 10:15:40 +03:00
if ( inet6_rsk ( req ) - > pktopts )
kfree_skb ( inet6_rsk ( req ) - > pktopts ) ;
2005-04-17 02:20:36 +04:00
}
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
static struct tcp_md5sig_key * tcp_v6_md5_do_lookup ( struct sock * sk ,
struct in6_addr * addr )
{
struct tcp_sock * tp = tcp_sk ( sk ) ;
int i ;
BUG_ON ( tp = = NULL ) ;
if ( ! tp - > md5sig_info | | ! tp - > md5sig_info - > entries6 )
return NULL ;
for ( i = 0 ; i < tp - > md5sig_info - > entries6 ; i + + ) {
if ( ipv6_addr_cmp ( & tp - > md5sig_info - > keys6 [ i ] . addr , addr ) = = 0 )
return ( struct tcp_md5sig_key * ) & tp - > md5sig_info - > keys6 [ i ] ;
}
return NULL ;
}
static struct tcp_md5sig_key * tcp_v6_md5_lookup ( struct sock * sk ,
struct sock * addr_sk )
{
return tcp_v6_md5_do_lookup ( sk , & inet6_sk ( addr_sk ) - > daddr ) ;
}
static struct tcp_md5sig_key * tcp_v6_reqsk_md5_lookup ( struct sock * sk ,
struct request_sock * req )
{
return tcp_v6_md5_do_lookup ( sk , & inet6_rsk ( req ) - > rmt_addr ) ;
}
static int tcp_v6_md5_do_add ( struct sock * sk , struct in6_addr * peer ,
char * newkey , u8 newkeylen )
{
/* Add key to the list */
struct tcp6_md5sig_key * key ;
struct tcp_sock * tp = tcp_sk ( sk ) ;
struct tcp6_md5sig_key * keys ;
key = ( struct tcp6_md5sig_key * ) tcp_v6_md5_do_lookup ( sk , peer ) ;
if ( key ) {
/* modify existing entry - just update that one */
kfree ( key - > key ) ;
key - > key = newkey ;
key - > keylen = newkeylen ;
} else {
/* reallocate new list if current one is full. */
if ( ! tp - > md5sig_info ) {
tp - > md5sig_info = kzalloc ( sizeof ( * tp - > md5sig_info ) , GFP_ATOMIC ) ;
if ( ! tp - > md5sig_info ) {
kfree ( newkey ) ;
return - ENOMEM ;
}
}
tcp_alloc_md5sig_pool ( ) ;
if ( tp - > md5sig_info - > alloced6 = = tp - > md5sig_info - > entries6 ) {
keys = kmalloc ( ( sizeof ( tp - > md5sig_info - > keys6 [ 0 ] ) *
( tp - > md5sig_info - > entries6 + 1 ) ) , GFP_ATOMIC ) ;
if ( ! keys ) {
tcp_free_md5sig_pool ( ) ;
kfree ( newkey ) ;
return - ENOMEM ;
}
if ( tp - > md5sig_info - > entries6 )
memmove ( keys , tp - > md5sig_info - > keys6 ,
( sizeof ( tp - > md5sig_info - > keys6 [ 0 ] ) *
tp - > md5sig_info - > entries6 ) ) ;
kfree ( tp - > md5sig_info - > keys6 ) ;
tp - > md5sig_info - > keys6 = keys ;
tp - > md5sig_info - > alloced6 + + ;
}
ipv6_addr_copy ( & tp - > md5sig_info - > keys6 [ tp - > md5sig_info - > entries6 ] . addr ,
peer ) ;
tp - > md5sig_info - > keys6 [ tp - > md5sig_info - > entries6 ] . key = newkey ;
tp - > md5sig_info - > keys6 [ tp - > md5sig_info - > entries6 ] . keylen = newkeylen ;
tp - > md5sig_info - > entries6 + + ;
}
return 0 ;
}
static int tcp_v6_md5_add_func ( struct sock * sk , struct sock * addr_sk ,
u8 * newkey , __u8 newkeylen )
{
return tcp_v6_md5_do_add ( sk , & inet6_sk ( addr_sk ) - > daddr ,
newkey , newkeylen ) ;
}
static int tcp_v6_md5_do_del ( struct sock * sk , struct in6_addr * peer )
{
struct tcp_sock * tp = tcp_sk ( sk ) ;
int i ;
for ( i = 0 ; i < tp - > md5sig_info - > entries6 ; i + + ) {
if ( ipv6_addr_cmp ( & tp - > md5sig_info - > keys6 [ i ] . addr , peer ) = = 0 ) {
/* Free the key */
kfree ( tp - > md5sig_info - > keys6 [ i ] . key ) ;
tp - > md5sig_info - > entries6 - - ;
if ( tp - > md5sig_info - > entries6 = = 0 ) {
kfree ( tp - > md5sig_info - > keys6 ) ;
tp - > md5sig_info - > keys6 = NULL ;
tcp_free_md5sig_pool ( ) ;
return 0 ;
} else {
/* shrink the database */
if ( tp - > md5sig_info - > entries6 ! = i )
memmove ( & tp - > md5sig_info - > keys6 [ i ] ,
& tp - > md5sig_info - > keys6 [ i + 1 ] ,
( tp - > md5sig_info - > entries6 - i )
* sizeof ( tp - > md5sig_info - > keys6 [ 0 ] ) ) ;
}
}
}
return - ENOENT ;
}
static void tcp_v6_clear_md5_list ( struct sock * sk )
{
struct tcp_sock * tp = tcp_sk ( sk ) ;
int i ;
if ( tp - > md5sig_info - > entries6 ) {
for ( i = 0 ; i < tp - > md5sig_info - > entries6 ; i + + )
kfree ( tp - > md5sig_info - > keys6 [ i ] . key ) ;
tp - > md5sig_info - > entries6 = 0 ;
tcp_free_md5sig_pool ( ) ;
}
kfree ( tp - > md5sig_info - > keys6 ) ;
tp - > md5sig_info - > keys6 = NULL ;
tp - > md5sig_info - > alloced6 = 0 ;
if ( tp - > md5sig_info - > entries4 ) {
for ( i = 0 ; i < tp - > md5sig_info - > entries4 ; i + + )
kfree ( tp - > md5sig_info - > keys4 [ i ] . key ) ;
tp - > md5sig_info - > entries4 = 0 ;
tcp_free_md5sig_pool ( ) ;
}
kfree ( tp - > md5sig_info - > keys4 ) ;
tp - > md5sig_info - > keys4 = NULL ;
tp - > md5sig_info - > alloced4 = 0 ;
}
static int tcp_v6_parse_md5_keys ( struct sock * sk , char __user * optval ,
int optlen )
{
struct tcp_md5sig cmd ;
struct sockaddr_in6 * sin6 = ( struct sockaddr_in6 * ) & cmd . tcpm_addr ;
u8 * newkey ;
if ( optlen < sizeof ( cmd ) )
return - EINVAL ;
if ( copy_from_user ( & cmd , optval , sizeof ( cmd ) ) )
return - EFAULT ;
if ( sin6 - > sin6_family ! = AF_INET6 )
return - EINVAL ;
if ( ! cmd . tcpm_keylen ) {
if ( ! tcp_sk ( sk ) - > md5sig_info )
return - ENOENT ;
if ( ipv6_addr_type ( & sin6 - > sin6_addr ) & IPV6_ADDR_MAPPED )
return tcp_v4_md5_do_del ( sk , sin6 - > sin6_addr . s6_addr32 [ 3 ] ) ;
return tcp_v6_md5_do_del ( sk , & sin6 - > sin6_addr ) ;
}
if ( cmd . tcpm_keylen > TCP_MD5SIG_MAXKEYLEN )
return - EINVAL ;
if ( ! tcp_sk ( sk ) - > md5sig_info ) {
struct tcp_sock * tp = tcp_sk ( sk ) ;
struct tcp_md5sig_info * p ;
p = kzalloc ( sizeof ( struct tcp_md5sig_info ) , GFP_KERNEL ) ;
if ( ! p )
return - ENOMEM ;
tp - > md5sig_info = p ;
}
2006-11-17 17:14:37 +03:00
newkey = kmemdup ( cmd . tcpm_key , cmd . tcpm_keylen , GFP_KERNEL ) ;
2006-11-15 06:07:45 +03:00
if ( ! newkey )
return - ENOMEM ;
if ( ipv6_addr_type ( & sin6 - > sin6_addr ) & IPV6_ADDR_MAPPED ) {
return tcp_v4_md5_do_add ( sk , sin6 - > sin6_addr . s6_addr32 [ 3 ] ,
newkey , cmd . tcpm_keylen ) ;
}
return tcp_v6_md5_do_add ( sk , & sin6 - > sin6_addr , newkey , cmd . tcpm_keylen ) ;
}
static int tcp_v6_do_calc_md5_hash ( char * md5_hash , struct tcp_md5sig_key * key ,
struct in6_addr * saddr ,
struct in6_addr * daddr ,
struct tcphdr * th , int protocol ,
int tcplen )
{
struct scatterlist sg [ 4 ] ;
__u16 data_len ;
int block = 0 ;
__u16 cksum ;
struct tcp_md5sig_pool * hp ;
struct tcp6_pseudohdr * bp ;
struct hash_desc * desc ;
int err ;
unsigned int nbytes = 0 ;
hp = tcp_get_md5sig_pool ( ) ;
if ( ! hp ) {
printk ( KERN_WARNING " %s(): hash pool not found... \n " , __FUNCTION__ ) ;
goto clear_hash_noput ;
}
bp = & hp - > md5_blk . ip6 ;
desc = & hp - > md5_desc ;
/* 1. TCP pseudo-header (RFC2460) */
ipv6_addr_copy ( & bp - > saddr , saddr ) ;
ipv6_addr_copy ( & bp - > daddr , daddr ) ;
bp - > len = htonl ( tcplen ) ;
bp - > protocol = htonl ( protocol ) ;
sg_set_buf ( & sg [ block + + ] , bp , sizeof ( * bp ) ) ;
nbytes + = sizeof ( * bp ) ;
/* 2. TCP header, excluding options */
cksum = th - > check ;
th - > check = 0 ;
sg_set_buf ( & sg [ block + + ] , th , sizeof ( * th ) ) ;
nbytes + = sizeof ( * th ) ;
/* 3. TCP segment data (if any) */
data_len = tcplen - ( th - > doff < < 2 ) ;
if ( data_len > 0 ) {
u8 * data = ( u8 * ) th + ( th - > doff < < 2 ) ;
sg_set_buf ( & sg [ block + + ] , data , data_len ) ;
nbytes + = data_len ;
}
/* 4. shared key */
sg_set_buf ( & sg [ block + + ] , key - > key , key - > keylen ) ;
nbytes + = key - > keylen ;
/* Now store the hash into the packet */
err = crypto_hash_init ( desc ) ;
if ( err ) {
printk ( KERN_WARNING " %s(): hash_init failed \n " , __FUNCTION__ ) ;
goto clear_hash ;
}
err = crypto_hash_update ( desc , sg , nbytes ) ;
if ( err ) {
printk ( KERN_WARNING " %s(): hash_update failed \n " , __FUNCTION__ ) ;
goto clear_hash ;
}
err = crypto_hash_final ( desc , md5_hash ) ;
if ( err ) {
printk ( KERN_WARNING " %s(): hash_final failed \n " , __FUNCTION__ ) ;
goto clear_hash ;
}
/* Reset header, and free up the crypto */
tcp_put_md5sig_pool ( ) ;
th - > check = cksum ;
out :
return 0 ;
clear_hash :
tcp_put_md5sig_pool ( ) ;
clear_hash_noput :
memset ( md5_hash , 0 , 16 ) ;
goto out ;
}
static int tcp_v6_calc_md5_hash ( char * md5_hash , struct tcp_md5sig_key * key ,
struct sock * sk ,
struct dst_entry * dst ,
struct request_sock * req ,
struct tcphdr * th , int protocol ,
int tcplen )
{
struct in6_addr * saddr , * daddr ;
if ( sk ) {
saddr = & inet6_sk ( sk ) - > saddr ;
daddr = & inet6_sk ( sk ) - > daddr ;
} else {
saddr = & inet6_rsk ( req ) - > loc_addr ;
daddr = & inet6_rsk ( req ) - > rmt_addr ;
}
return tcp_v6_do_calc_md5_hash ( md5_hash , key ,
saddr , daddr ,
th , protocol , tcplen ) ;
}
static int tcp_v6_inbound_md5_hash ( struct sock * sk , struct sk_buff * skb )
{
__u8 * hash_location = NULL ;
struct tcp_md5sig_key * hash_expected ;
struct ipv6hdr * ip6h = skb - > nh . ipv6h ;
struct tcphdr * th = skb - > h . th ;
int length = ( th - > doff < < 2 ) - sizeof ( * th ) ;
int genhash ;
u8 * ptr ;
u8 newhash [ 16 ] ;
hash_expected = tcp_v6_md5_do_lookup ( sk , & ip6h - > saddr ) ;
/* If the TCP option is too short, we can short cut */
if ( length < TCPOLEN_MD5SIG )
return hash_expected ? 1 : 0 ;
/* parse options */
ptr = ( u8 * ) ( th + 1 ) ;
while ( length > 0 ) {
int opcode = * ptr + + ;
int opsize ;
switch ( opcode ) {
case TCPOPT_EOL :
goto done_opts ;
case TCPOPT_NOP :
length - - ;
continue ;
default :
opsize = * ptr + + ;
if ( opsize < 2 | | opsize > length )
goto done_opts ;
if ( opcode = = TCPOPT_MD5SIG ) {
hash_location = ptr ;
goto done_opts ;
}
}
ptr + = opsize - 2 ;
length - = opsize ;
}
done_opts :
/* do we have a hash as expected? */
if ( ! hash_expected ) {
if ( ! hash_location )
return 0 ;
if ( net_ratelimit ( ) ) {
printk ( KERN_INFO " MD5 Hash NOT expected but found "
" ( " NIP6_FMT " , %u)-> "
" ( " NIP6_FMT " , %u) \n " ,
NIP6 ( ip6h - > saddr ) , ntohs ( th - > source ) ,
NIP6 ( ip6h - > daddr ) , ntohs ( th - > dest ) ) ;
}
return 1 ;
}
if ( ! hash_location ) {
if ( net_ratelimit ( ) ) {
printk ( KERN_INFO " MD5 Hash expected but NOT found "
" ( " NIP6_FMT " , %u)-> "
" ( " NIP6_FMT " , %u) \n " ,
NIP6 ( ip6h - > saddr ) , ntohs ( th - > source ) ,
NIP6 ( ip6h - > daddr ) , ntohs ( th - > dest ) ) ;
}
return 1 ;
}
/* check the signature */
genhash = tcp_v6_do_calc_md5_hash ( newhash ,
hash_expected ,
& ip6h - > saddr , & ip6h - > daddr ,
th , sk - > sk_protocol ,
skb - > len ) ;
if ( genhash | | memcmp ( hash_location , newhash , 16 ) ! = 0 ) {
if ( net_ratelimit ( ) ) {
printk ( KERN_INFO " MD5 Hash %s for "
" ( " NIP6_FMT " , %u)-> "
" ( " NIP6_FMT " , %u) \n " ,
genhash ? " failed " : " mismatch " ,
NIP6 ( ip6h - > saddr ) , ntohs ( th - > source ) ,
NIP6 ( ip6h - > daddr ) , ntohs ( th - > dest ) ) ;
}
return 1 ;
}
return 0 ;
}
# endif
2006-11-10 03:26:09 +03:00
static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
2005-04-17 02:20:36 +04:00
. family = AF_INET6 ,
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
. obj_size = sizeof ( struct tcp6_request_sock ) ,
2005-04-17 02:20:36 +04:00
. rtx_syn_ack = tcp_v6_send_synack ,
2005-06-19 09:47:21 +04:00
. send_ack = tcp_v6_reqsk_send_ack ,
. destructor = tcp_v6_reqsk_destructor ,
2005-04-17 02:20:36 +04:00
. send_reset = tcp_v6_send_reset
} ;
2006-11-15 06:07:45 +03:00
struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
# ifdef CONFIG_TCP_MD5SIG
. md5_lookup = tcp_v6_reqsk_md5_lookup ,
# endif
} ;
2005-12-14 10:25:19 +03:00
static struct timewait_sock_ops tcp6_timewait_sock_ops = {
. twsk_obj_size = sizeof ( struct tcp6_timewait_sock ) ,
. twsk_unique = tcp_twsk_unique ,
2006-11-15 06:07:45 +03:00
. twsk_destructor = tcp_twsk_destructor ,
2005-12-14 10:25:19 +03:00
} ;
2005-12-14 10:15:52 +03:00
static void tcp_v6_send_check ( struct sock * sk , int len , struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
struct ipv6_pinfo * np = inet6_sk ( sk ) ;
2005-12-14 10:15:52 +03:00
struct tcphdr * th = skb - > h . th ;
2005-04-17 02:20:36 +04:00
2006-08-30 03:44:56 +04:00
if ( skb - > ip_summed = = CHECKSUM_PARTIAL ) {
2005-04-17 02:20:36 +04:00
th - > check = ~ csum_ipv6_magic ( & np - > saddr , & np - > daddr , len , IPPROTO_TCP , 0 ) ;
skb - > csum = offsetof ( struct tcphdr , check ) ;
} else {
th - > check = csum_ipv6_magic ( & np - > saddr , & np - > daddr , len , IPPROTO_TCP ,
csum_partial ( ( char * ) th , th - > doff < < 2 ,
skb - > csum ) ) ;
}
}
2006-07-09 00:34:56 +04:00
static int tcp_v6_gso_send_check ( struct sk_buff * skb )
{
struct ipv6hdr * ipv6h ;
struct tcphdr * th ;
if ( ! pskb_may_pull ( skb , sizeof ( * th ) ) )
return - EINVAL ;
ipv6h = skb - > nh . ipv6h ;
th = skb - > h . th ;
th - > check = 0 ;
th - > check = ~ csum_ipv6_magic ( & ipv6h - > saddr , & ipv6h - > daddr , skb - > len ,
IPPROTO_TCP , 0 ) ;
skb - > csum = offsetof ( struct tcphdr , check ) ;
2006-08-30 03:44:56 +04:00
skb - > ip_summed = CHECKSUM_PARTIAL ;
2006-07-09 00:34:56 +04:00
return 0 ;
}
2005-04-17 02:20:36 +04:00
2006-11-15 06:07:45 +03:00
static void tcp_v6_send_reset ( struct sock * sk , struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
struct tcphdr * th = skb - > h . th , * t1 ;
struct sk_buff * buff ;
struct flowi fl ;
2006-11-15 06:07:45 +03:00
int tot_len = sizeof ( * th ) ;
# ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key * key ;
# endif
2005-04-17 02:20:36 +04:00
if ( th - > rst )
return ;
if ( ! ipv6_unicast_destination ( skb ) )
return ;
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
if ( sk )
key = tcp_v6_md5_do_lookup ( sk , & skb - > nh . ipv6h - > daddr ) ;
else
key = NULL ;
if ( key )
tot_len + = TCPOLEN_MD5SIG_ALIGNED ;
# endif
2005-04-17 02:20:36 +04:00
/*
* We need to grab some memory , and put together an RST ,
* and then put it into the queue to be sent .
*/
2006-11-15 06:07:45 +03:00
buff = alloc_skb ( MAX_HEADER + sizeof ( struct ipv6hdr ) + tot_len ,
2005-04-17 02:20:36 +04:00
GFP_ATOMIC ) ;
if ( buff = = NULL )
return ;
2006-11-15 06:07:45 +03:00
skb_reserve ( buff , MAX_HEADER + sizeof ( struct ipv6hdr ) + tot_len ) ;
2005-04-17 02:20:36 +04:00
2006-11-15 06:07:45 +03:00
t1 = ( struct tcphdr * ) skb_push ( buff , tot_len ) ;
2005-04-17 02:20:36 +04:00
/* Swap the send and the receive. */
memset ( t1 , 0 , sizeof ( * t1 ) ) ;
t1 - > dest = th - > source ;
t1 - > source = th - > dest ;
2006-11-15 06:07:45 +03:00
t1 - > doff = tot_len / 4 ;
2005-04-17 02:20:36 +04:00
t1 - > rst = 1 ;
if ( th - > ack ) {
t1 - > seq = th - > ack_seq ;
} else {
t1 - > ack = 1 ;
t1 - > ack_seq = htonl ( ntohl ( th - > seq ) + th - > syn + th - > fin
+ skb - > len - ( th - > doff < < 2 ) ) ;
}
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
if ( key ) {
u32 * opt = ( u32 * ) ( t1 + 1 ) ;
opt [ 0 ] = htonl ( ( TCPOPT_NOP < < 24 ) |
( TCPOPT_NOP < < 16 ) |
( TCPOPT_MD5SIG < < 8 ) |
TCPOLEN_MD5SIG ) ;
tcp_v6_do_calc_md5_hash ( ( __u8 * ) & opt [ 1 ] ,
key ,
& skb - > nh . ipv6h - > daddr ,
& skb - > nh . ipv6h - > saddr ,
t1 , IPPROTO_TCP ,
tot_len ) ;
}
# endif
2005-04-17 02:20:36 +04:00
buff - > csum = csum_partial ( ( char * ) t1 , sizeof ( * t1 ) , 0 ) ;
memset ( & fl , 0 , sizeof ( fl ) ) ;
ipv6_addr_copy ( & fl . fl6_dst , & skb - > nh . ipv6h - > saddr ) ;
ipv6_addr_copy ( & fl . fl6_src , & skb - > nh . ipv6h - > daddr ) ;
t1 - > check = csum_ipv6_magic ( & fl . fl6_src , & fl . fl6_dst ,
sizeof ( * t1 ) , IPPROTO_TCP ,
buff - > csum ) ;
fl . proto = IPPROTO_TCP ;
2005-08-12 16:19:38 +04:00
fl . oif = inet6_iif ( skb ) ;
2005-04-17 02:20:36 +04:00
fl . fl_ip_dport = t1 - > dest ;
fl . fl_ip_sport = t1 - > source ;
2006-08-05 10:12:42 +04:00
security_skb_classify_flow ( skb , & fl ) ;
2005-04-17 02:20:36 +04:00
/* sk = NULL, but it is safe for now. RST socket required. */
if ( ! ip6_dst_lookup ( NULL , & buff - > dst , & fl ) ) {
2005-12-13 01:38:10 +03:00
if ( xfrm_lookup ( & buff - > dst , & fl , NULL , 0 ) > = 0 ) {
2006-01-12 02:53:04 +03:00
ip6_xmit ( tcp6_socket - > sk , buff , & fl , NULL , 0 ) ;
2005-12-13 01:38:10 +03:00
TCP_INC_STATS_BH ( TCP_MIB_OUTSEGS ) ;
TCP_INC_STATS_BH ( TCP_MIB_OUTRSTS ) ;
2005-04-17 02:20:36 +04:00
return ;
2005-12-13 01:38:10 +03:00
}
2005-04-17 02:20:36 +04:00
}
kfree_skb ( buff ) ;
}
2006-11-15 06:07:45 +03:00
static void tcp_v6_send_ack ( struct tcp_timewait_sock * tw ,
struct sk_buff * skb , u32 seq , u32 ack , u32 win , u32 ts )
2005-04-17 02:20:36 +04:00
{
struct tcphdr * th = skb - > h . th , * t1 ;
struct sk_buff * buff ;
struct flowi fl ;
int tot_len = sizeof ( struct tcphdr ) ;
2006-11-15 07:56:00 +03:00
__be32 * topt ;
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key * key ;
struct tcp_md5sig_key tw_key ;
# endif
# ifdef CONFIG_TCP_MD5SIG
if ( ! tw & & skb - > sk ) {
key = tcp_v6_md5_do_lookup ( skb - > sk , & skb - > nh . ipv6h - > daddr ) ;
} else if ( tw & & tw - > tw_md5_keylen ) {
tw_key . key = tw - > tw_md5_key ;
tw_key . keylen = tw - > tw_md5_keylen ;
key = & tw_key ;
} else {
key = NULL ;
}
# endif
2005-04-17 02:20:36 +04:00
if ( ts )
2006-10-11 06:40:50 +04:00
tot_len + = TCPOLEN_TSTAMP_ALIGNED ;
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
if ( key )
tot_len + = TCPOLEN_MD5SIG_ALIGNED ;
# endif
2005-04-17 02:20:36 +04:00
buff = alloc_skb ( MAX_HEADER + sizeof ( struct ipv6hdr ) + tot_len ,
GFP_ATOMIC ) ;
if ( buff = = NULL )
return ;
skb_reserve ( buff , MAX_HEADER + sizeof ( struct ipv6hdr ) + tot_len ) ;
t1 = ( struct tcphdr * ) skb_push ( buff , tot_len ) ;
/* Swap the send and the receive. */
memset ( t1 , 0 , sizeof ( * t1 ) ) ;
t1 - > dest = th - > source ;
t1 - > source = th - > dest ;
t1 - > doff = tot_len / 4 ;
t1 - > seq = htonl ( seq ) ;
t1 - > ack_seq = htonl ( ack ) ;
t1 - > ack = 1 ;
t1 - > window = htons ( win ) ;
2006-11-15 06:07:45 +03:00
2006-11-15 07:56:00 +03:00
topt = ( __be32 * ) ( t1 + 1 ) ;
2005-04-17 02:20:36 +04:00
if ( ts ) {
2006-11-15 06:07:45 +03:00
* topt + + = htonl ( ( TCPOPT_NOP < < 24 ) | ( TCPOPT_NOP < < 16 ) |
( TCPOPT_TIMESTAMP < < 8 ) | TCPOLEN_TIMESTAMP ) ;
* topt + + = htonl ( tcp_time_stamp ) ;
* topt = htonl ( ts ) ;
2005-04-17 02:20:36 +04:00
}
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
if ( key ) {
* topt + + = htonl ( ( TCPOPT_NOP < < 24 ) | ( TCPOPT_NOP < < 16 ) |
( TCPOPT_MD5SIG < < 8 ) | TCPOLEN_MD5SIG ) ;
tcp_v6_do_calc_md5_hash ( ( __u8 * ) topt ,
key ,
& skb - > nh . ipv6h - > daddr ,
& skb - > nh . ipv6h - > saddr ,
t1 , IPPROTO_TCP ,
tot_len ) ;
}
# endif
2005-04-17 02:20:36 +04:00
buff - > csum = csum_partial ( ( char * ) t1 , tot_len , 0 ) ;
memset ( & fl , 0 , sizeof ( fl ) ) ;
ipv6_addr_copy ( & fl . fl6_dst , & skb - > nh . ipv6h - > saddr ) ;
ipv6_addr_copy ( & fl . fl6_src , & skb - > nh . ipv6h - > daddr ) ;
t1 - > check = csum_ipv6_magic ( & fl . fl6_src , & fl . fl6_dst ,
tot_len , IPPROTO_TCP ,
buff - > csum ) ;
fl . proto = IPPROTO_TCP ;
2005-08-12 16:19:38 +04:00
fl . oif = inet6_iif ( skb ) ;
2005-04-17 02:20:36 +04:00
fl . fl_ip_dport = t1 - > dest ;
fl . fl_ip_sport = t1 - > source ;
2006-08-05 10:12:42 +04:00
security_skb_classify_flow ( skb , & fl ) ;
2005-04-17 02:20:36 +04:00
if ( ! ip6_dst_lookup ( NULL , & buff - > dst , & fl ) ) {
2005-12-13 01:38:10 +03:00
if ( xfrm_lookup ( & buff - > dst , & fl , NULL , 0 ) > = 0 ) {
2006-01-12 02:53:04 +03:00
ip6_xmit ( tcp6_socket - > sk , buff , & fl , NULL , 0 ) ;
2005-12-13 01:38:10 +03:00
TCP_INC_STATS_BH ( TCP_MIB_OUTSEGS ) ;
2005-04-17 02:20:36 +04:00
return ;
2005-12-13 01:38:10 +03:00
}
2005-04-17 02:20:36 +04:00
}
kfree_skb ( buff ) ;
}
static void tcp_v6_timewait_ack ( struct sock * sk , struct sk_buff * skb )
{
2005-08-10 07:09:30 +04:00
struct inet_timewait_sock * tw = inet_twsk ( sk ) ;
2006-11-15 06:07:45 +03:00
struct tcp_timewait_sock * tcptw = tcp_twsk ( sk ) ;
2005-04-17 02:20:36 +04:00
2006-11-15 06:07:45 +03:00
tcp_v6_send_ack ( tcptw , skb , tcptw - > tw_snd_nxt , tcptw - > tw_rcv_nxt ,
2005-08-10 07:09:30 +04:00
tcptw - > tw_rcv_wnd > > tw - > tw_rcv_wscale ,
tcptw - > tw_ts_recent ) ;
2005-04-17 02:20:36 +04:00
2005-08-10 07:09:30 +04:00
inet_twsk_put ( tw ) ;
2005-04-17 02:20:36 +04:00
}
2005-06-19 09:47:21 +04:00
static void tcp_v6_reqsk_send_ack ( struct sk_buff * skb , struct request_sock * req )
2005-04-17 02:20:36 +04:00
{
2006-11-15 06:07:45 +03:00
tcp_v6_send_ack ( NULL , skb , tcp_rsk ( req ) - > snt_isn + 1 , tcp_rsk ( req ) - > rcv_isn + 1 , req - > rcv_wnd , req - > ts_recent ) ;
2005-04-17 02:20:36 +04:00
}
static struct sock * tcp_v6_hnd_req ( struct sock * sk , struct sk_buff * skb )
{
2005-06-19 09:47:21 +04:00
struct request_sock * req , * * prev ;
2005-08-12 16:19:38 +04:00
const struct tcphdr * th = skb - > h . th ;
2005-04-17 02:20:36 +04:00
struct sock * nsk ;
/* Find possible connection requests. */
2005-12-14 10:15:24 +03:00
req = inet6_csk_search_req ( sk , & prev , th - > source ,
& skb - > nh . ipv6h - > saddr ,
& skb - > nh . ipv6h - > daddr , inet6_iif ( skb ) ) ;
2005-04-17 02:20:36 +04:00
if ( req )
return tcp_check_req ( sk , skb , req , prev ) ;
2005-08-12 16:19:38 +04:00
nsk = __inet6_lookup_established ( & tcp_hashinfo , & skb - > nh . ipv6h - > saddr ,
th - > source , & skb - > nh . ipv6h - > daddr ,
ntohs ( th - > dest ) , inet6_iif ( skb ) ) ;
2005-04-17 02:20:36 +04:00
if ( nsk ) {
if ( nsk - > sk_state ! = TCP_TIME_WAIT ) {
bh_lock_sock ( nsk ) ;
return nsk ;
}
2006-10-11 06:41:46 +04:00
inet_twsk_put ( inet_twsk ( nsk ) ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
#if 0 /*def CONFIG_SYN_COOKIES*/
if ( ! th - > rst & & ! th - > syn & & th - > ack )
sk = cookie_v6_check ( sk , skb , & ( IPCB ( skb ) - > opt ) ) ;
# endif
return sk ;
}
/* FIXME: this is substantially similar to the ipv4 code.
* Can some kind of merge be done ? - - erics
*/
static int tcp_v6_conn_request ( struct sock * sk , struct sk_buff * skb )
{
2005-12-14 10:15:40 +03:00
struct inet6_request_sock * treq ;
2005-04-17 02:20:36 +04:00
struct ipv6_pinfo * np = inet6_sk ( sk ) ;
struct tcp_options_received tmp_opt ;
struct tcp_sock * tp = tcp_sk ( sk ) ;
2005-06-19 09:47:21 +04:00
struct request_sock * req = NULL ;
2005-04-17 02:20:36 +04:00
__u32 isn = TCP_SKB_CB ( skb ) - > when ;
if ( skb - > protocol = = htons ( ETH_P_IP ) )
return tcp_v4_conn_request ( sk , skb ) ;
if ( ! ipv6_unicast_destination ( skb ) )
goto drop ;
/*
* There are no SYN attacks on IPv6 , yet . . .
*/
2005-08-10 07:10:42 +04:00
if ( inet_csk_reqsk_queue_is_full ( sk ) & & ! isn ) {
2005-04-17 02:20:36 +04:00
if ( net_ratelimit ( ) )
printk ( KERN_INFO " TCPv6: dropping request, synflood is possible \n " ) ;
goto drop ;
}
2005-08-10 07:10:42 +04:00
if ( sk_acceptq_is_full ( sk ) & & inet_csk_reqsk_queue_young ( sk ) > 1 )
2005-04-17 02:20:36 +04:00
goto drop ;
2005-12-14 10:15:40 +03:00
req = inet6_reqsk_alloc ( & tcp6_request_sock_ops ) ;
2005-04-17 02:20:36 +04:00
if ( req = = NULL )
goto drop ;
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
tcp_rsk ( req ) - > af_specific = & tcp_request_sock_ipv6_ops ;
# endif
2005-04-17 02:20:36 +04:00
tcp_clear_options ( & tmp_opt ) ;
tmp_opt . mss_clamp = IPV6_MIN_MTU - sizeof ( struct tcphdr ) - sizeof ( struct ipv6hdr ) ;
tmp_opt . user_mss = tp - > rx_opt . user_mss ;
tcp_parse_options ( skb , & tmp_opt , 0 ) ;
tmp_opt . tstamp_ok = tmp_opt . saw_tstamp ;
tcp_openreq_init ( req , & tmp_opt , skb ) ;
2005-12-14 10:15:40 +03:00
treq = inet6_rsk ( req ) ;
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
ipv6_addr_copy ( & treq - > rmt_addr , & skb - > nh . ipv6h - > saddr ) ;
ipv6_addr_copy ( & treq - > loc_addr , & skb - > nh . ipv6h - > daddr ) ;
2005-04-17 02:20:36 +04:00
TCP_ECN_create_request ( req , skb - > h . th ) ;
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
treq - > pktopts = NULL ;
2005-04-17 02:20:36 +04:00
if ( ipv6_opt_accepted ( sk , skb ) | |
[IPV6]: Support several new sockopt / ancillary data in Advanced API (RFC3542).
Support several new socket options / ancillary data:
IPV6_RECVPKTINFO, IPV6_PKTINFO,
IPV6_RECVHOPOPTS, IPV6_HOPOPTS,
IPV6_RECVDSTOPTS, IPV6_DSTOPTS, IPV6_RTHDRDSTOPTS,
IPV6_RECVRTHDR, IPV6_RTHDR,
IPV6_RECVHOPOPTS, IPV6_HOPOPTS
Old semantics are preserved as IPV6_2292xxxx so that
we can maintain backward compatibility.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
2005-09-08 04:59:17 +04:00
np - > rxopt . bits . rxinfo | | np - > rxopt . bits . rxoinfo | |
np - > rxopt . bits . rxhlim | | np - > rxopt . bits . rxohlim ) {
2005-04-17 02:20:36 +04:00
atomic_inc ( & skb - > users ) ;
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
treq - > pktopts = skb ;
2005-04-17 02:20:36 +04:00
}
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
treq - > iif = sk - > sk_bound_dev_if ;
2005-04-17 02:20:36 +04:00
/* So that link locals have meaning */
if ( ! sk - > sk_bound_dev_if & &
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
ipv6_addr_type ( & treq - > rmt_addr ) & IPV6_ADDR_LINKLOCAL )
2005-08-12 16:19:38 +04:00
treq - > iif = inet6_iif ( skb ) ;
2005-04-17 02:20:36 +04:00
if ( isn = = 0 )
2006-11-11 01:06:49 +03:00
isn = tcp_v6_init_sequence ( skb ) ;
2005-04-17 02:20:36 +04:00
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
tcp_rsk ( req ) - > snt_isn = isn ;
2005-04-17 02:20:36 +04:00
2006-07-25 10:32:50 +04:00
security_inet_conn_request ( sk , skb , req ) ;
2005-04-17 02:20:36 +04:00
if ( tcp_v6_send_synack ( sk , req , NULL ) )
goto drop ;
2005-12-14 10:15:24 +03:00
inet6_csk_reqsk_queue_hash_add ( sk , req , TCP_TIMEOUT_INIT ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
drop :
if ( req )
2005-06-19 09:47:21 +04:00
reqsk_free ( req ) ;
2005-04-17 02:20:36 +04:00
return 0 ; /* don't send reset */
}
static struct sock * tcp_v6_syn_recv_sock ( struct sock * sk , struct sk_buff * skb ,
2005-06-19 09:47:21 +04:00
struct request_sock * req ,
2005-04-17 02:20:36 +04:00
struct dst_entry * dst )
{
2005-12-14 10:15:40 +03:00
struct inet6_request_sock * treq = inet6_rsk ( req ) ;
2005-04-17 02:20:36 +04:00
struct ipv6_pinfo * newnp , * np = inet6_sk ( sk ) ;
struct tcp6_sock * newtcp6sk ;
struct inet_sock * newinet ;
struct tcp_sock * newtp ;
struct sock * newsk ;
struct ipv6_txoptions * opt ;
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key * key ;
# endif
2005-04-17 02:20:36 +04:00
if ( skb - > protocol = = htons ( ETH_P_IP ) ) {
/*
* v6 mapped
*/
newsk = tcp_v4_syn_recv_sock ( sk , skb , req , dst ) ;
if ( newsk = = NULL )
return NULL ;
newtcp6sk = ( struct tcp6_sock * ) newsk ;
inet_sk ( newsk ) - > pinet6 = & newtcp6sk - > inet6 ;
newinet = inet_sk ( newsk ) ;
newnp = inet6_sk ( newsk ) ;
newtp = tcp_sk ( newsk ) ;
memcpy ( newnp , np , sizeof ( struct ipv6_pinfo ) ) ;
ipv6_addr_set ( & newnp - > daddr , 0 , 0 , htonl ( 0x0000FFFF ) ,
newinet - > daddr ) ;
ipv6_addr_set ( & newnp - > saddr , 0 , 0 , htonl ( 0x0000FFFF ) ,
newinet - > saddr ) ;
ipv6_addr_copy ( & newnp - > rcv_saddr , & newnp - > saddr ) ;
2005-12-14 10:15:52 +03:00
inet_csk ( newsk ) - > icsk_af_ops = & ipv6_mapped ;
2005-04-17 02:20:36 +04:00
newsk - > sk_backlog_rcv = tcp_v4_do_rcv ;
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
newtp - > af_specific = & tcp_sock_ipv6_mapped_specific ;
# endif
2005-04-17 02:20:36 +04:00
newnp - > pktoptions = NULL ;
newnp - > opt = NULL ;
2005-08-12 16:19:38 +04:00
newnp - > mcast_oif = inet6_iif ( skb ) ;
2005-04-17 02:20:36 +04:00
newnp - > mcast_hops = skb - > nh . ipv6h - > hop_limit ;
2005-08-10 06:45:38 +04:00
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
* here , tcp_create_openreq_child now does this for us , see the comment in
* that function for the gory details . - acme
2005-04-17 02:20:36 +04:00
*/
/* It is tricky place. Until this moment IPv4 tcp
2005-12-14 10:15:52 +03:00
worked with IPv6 icsk . icsk_af_ops .
2005-04-17 02:20:36 +04:00
Sync it now .
*/
2005-12-14 10:26:10 +03:00
tcp_sync_mss ( newsk , inet_csk ( newsk ) - > icsk_pmtu_cookie ) ;
2005-04-17 02:20:36 +04:00
return newsk ;
}
opt = np - > opt ;
if ( sk_acceptq_is_full ( sk ) )
goto out_overflow ;
[IPV6]: Support several new sockopt / ancillary data in Advanced API (RFC3542).
Support several new socket options / ancillary data:
IPV6_RECVPKTINFO, IPV6_PKTINFO,
IPV6_RECVHOPOPTS, IPV6_HOPOPTS,
IPV6_RECVDSTOPTS, IPV6_DSTOPTS, IPV6_RTHDRDSTOPTS,
IPV6_RECVRTHDR, IPV6_RTHDR,
IPV6_RECVHOPOPTS, IPV6_HOPOPTS
Old semantics are preserved as IPV6_2292xxxx so that
we can maintain backward compatibility.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
2005-09-08 04:59:17 +04:00
if ( np - > rxopt . bits . osrcrt = = 2 & &
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
opt = = NULL & & treq - > pktopts ) {
struct inet6_skb_parm * rxopt = IP6CB ( treq - > pktopts ) ;
2005-04-17 02:20:36 +04:00
if ( rxopt - > srcrt )
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
opt = ipv6_invert_rthdr ( sk , ( struct ipv6_rt_hdr * ) ( treq - > pktopts - > nh . raw + rxopt - > srcrt ) ) ;
2005-04-17 02:20:36 +04:00
}
if ( dst = = NULL ) {
struct in6_addr * final_p = NULL , final ;
struct flowi fl ;
memset ( & fl , 0 , sizeof ( fl ) ) ;
fl . proto = IPPROTO_TCP ;
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
ipv6_addr_copy ( & fl . fl6_dst , & treq - > rmt_addr ) ;
2005-04-17 02:20:36 +04:00
if ( opt & & opt - > srcrt ) {
struct rt0_hdr * rt0 = ( struct rt0_hdr * ) opt - > srcrt ;
ipv6_addr_copy ( & final , & fl . fl6_dst ) ;
ipv6_addr_copy ( & fl . fl6_dst , rt0 - > addr ) ;
final_p = & final ;
}
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
ipv6_addr_copy ( & fl . fl6_src , & treq - > loc_addr ) ;
2005-04-17 02:20:36 +04:00
fl . oif = sk - > sk_bound_dev_if ;
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
fl . fl_ip_dport = inet_rsk ( req ) - > rmt_port ;
2005-04-17 02:20:36 +04:00
fl . fl_ip_sport = inet_sk ( sk ) - > sport ;
2006-07-25 10:32:50 +04:00
security_req_classify_flow ( req , & fl ) ;
2005-04-17 02:20:36 +04:00
if ( ip6_dst_lookup ( sk , & dst , & fl ) )
goto out ;
if ( final_p )
ipv6_addr_copy ( & fl . fl6_dst , final_p ) ;
if ( ( xfrm_lookup ( & dst , & fl , sk , 0 ) ) < 0 )
goto out ;
}
newsk = tcp_create_openreq_child ( sk , req , skb ) ;
if ( newsk = = NULL )
goto out ;
2005-08-10 06:45:38 +04:00
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks
* count here , tcp_create_openreq_child now does this for us , see the
* comment in that function for the gory details . - acme
*/
2005-04-17 02:20:36 +04:00
2006-08-26 02:55:43 +04:00
newsk - > sk_gso_type = SKB_GSO_TCPV6 ;
2006-08-30 04:15:09 +04:00
__ip6_dst_store ( newsk , dst , NULL , NULL ) ;
2005-04-17 02:20:36 +04:00
newtcp6sk = ( struct tcp6_sock * ) newsk ;
inet_sk ( newsk ) - > pinet6 = & newtcp6sk - > inet6 ;
newtp = tcp_sk ( newsk ) ;
newinet = inet_sk ( newsk ) ;
newnp = inet6_sk ( newsk ) ;
memcpy ( newnp , np , sizeof ( struct ipv6_pinfo ) ) ;
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
ipv6_addr_copy ( & newnp - > daddr , & treq - > rmt_addr ) ;
ipv6_addr_copy ( & newnp - > saddr , & treq - > loc_addr ) ;
ipv6_addr_copy ( & newnp - > rcv_saddr , & treq - > loc_addr ) ;
newsk - > sk_bound_dev_if = treq - > iif ;
2005-04-17 02:20:36 +04:00
/* Now IPv6 options...
First : no IPv4 options .
*/
newinet - > opt = NULL ;
/* Clone RX bits */
newnp - > rxopt . all = np - > rxopt . all ;
/* Clone pktoptions received with SYN */
newnp - > pktoptions = NULL ;
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
if ( treq - > pktopts ! = NULL ) {
newnp - > pktoptions = skb_clone ( treq - > pktopts , GFP_ATOMIC ) ;
kfree_skb ( treq - > pktopts ) ;
treq - > pktopts = NULL ;
2005-04-17 02:20:36 +04:00
if ( newnp - > pktoptions )
skb_set_owner_r ( newnp - > pktoptions , newsk ) ;
}
newnp - > opt = NULL ;
2005-08-12 16:19:38 +04:00
newnp - > mcast_oif = inet6_iif ( skb ) ;
2005-04-17 02:20:36 +04:00
newnp - > mcast_hops = skb - > nh . ipv6h - > hop_limit ;
/* Clone native IPv6 options from listening socket (if any)
Yes , keeping reference count would be much more clever ,
but we make one more one thing there : reattach optmem
to newsk .
*/
if ( opt ) {
newnp - > opt = ipv6_dup_options ( newsk , opt ) ;
if ( opt ! = np - > opt )
sock_kfree_s ( sk , opt , opt - > tot_len ) ;
}
2005-12-14 10:26:10 +03:00
inet_csk ( newsk ) - > icsk_ext_hdr_len = 0 ;
2005-04-17 02:20:36 +04:00
if ( newnp - > opt )
2005-12-14 10:26:10 +03:00
inet_csk ( newsk ) - > icsk_ext_hdr_len = ( newnp - > opt - > opt_nflen +
newnp - > opt - > opt_flen ) ;
2005-04-17 02:20:36 +04:00
2006-03-21 04:53:41 +03:00
tcp_mtup_init ( newsk ) ;
2005-04-17 02:20:36 +04:00
tcp_sync_mss ( newsk , dst_mtu ( dst ) ) ;
newtp - > advmss = dst_metric ( dst , RTAX_ADVMSS ) ;
tcp_initialize_rcv_mss ( newsk ) ;
newinet - > daddr = newinet - > saddr = newinet - > rcv_saddr = LOOPBACK4_IPV6 ;
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
if ( ( key = tcp_v6_md5_do_lookup ( sk , & newnp - > daddr ) ) ! = NULL ) {
/* We're using one, so create a matching key
* on the newsk structure . If we fail to get
* memory , then we end up not copying the key
* across . Shucks .
*/
2006-11-17 17:14:37 +03:00
char * newkey = kmemdup ( key - > key , key - > keylen , GFP_ATOMIC ) ;
if ( newkey ! = NULL )
2006-11-15 06:07:45 +03:00
tcp_v6_md5_do_add ( newsk , & inet6_sk ( sk ) - > daddr ,
newkey , key - > keylen ) ;
}
# endif
2005-12-14 10:15:01 +03:00
__inet6_hash ( & tcp_hashinfo , newsk ) ;
2005-08-10 07:07:13 +04:00
inet_inherit_port ( & tcp_hashinfo , sk , newsk ) ;
2005-04-17 02:20:36 +04:00
return newsk ;
out_overflow :
NET_INC_STATS_BH ( LINUX_MIB_LISTENOVERFLOWS ) ;
out :
NET_INC_STATS_BH ( LINUX_MIB_LISTENDROPS ) ;
if ( opt & & opt ! = np - > opt )
sock_kfree_s ( sk , opt , opt - > tot_len ) ;
dst_release ( dst ) ;
return NULL ;
}
2006-11-15 08:40:42 +03:00
static __sum16 tcp_v6_checksum_init ( struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
2006-08-30 03:44:56 +04:00
if ( skb - > ip_summed = = CHECKSUM_COMPLETE ) {
2005-04-17 02:20:36 +04:00
if ( ! tcp_v6_check ( skb - > h . th , skb - > len , & skb - > nh . ipv6h - > saddr ,
2005-11-11 00:01:24 +03:00
& skb - > nh . ipv6h - > daddr , skb - > csum ) ) {
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
2005-04-17 02:20:36 +04:00
return 0 ;
2005-11-11 00:01:24 +03:00
}
2005-04-17 02:20:36 +04:00
}
2005-11-11 00:01:24 +03:00
2006-11-15 08:35:48 +03:00
skb - > csum = ~ csum_unfold ( tcp_v6_check ( skb - > h . th , skb - > len , & skb - > nh . ipv6h - > saddr ,
& skb - > nh . ipv6h - > daddr , 0 ) ) ;
2005-11-11 00:01:24 +03:00
2005-04-17 02:20:36 +04:00
if ( skb - > len < = 76 ) {
2005-11-11 00:01:24 +03:00
return __skb_checksum_complete ( skb ) ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
/* The socket must have it's spinlock held when we get
* here .
*
* We have a potential double - lock case here , so even when
* doing backlog processing we use the BH locking scheme .
* This is because we cannot sleep with the original spinlock
* held .
*/
static int tcp_v6_do_rcv ( struct sock * sk , struct sk_buff * skb )
{
struct ipv6_pinfo * np = inet6_sk ( sk ) ;
struct tcp_sock * tp ;
struct sk_buff * opt_skb = NULL ;
/* Imagine: socket is IPv6. IPv4 packet arrives,
goes to IPv4 receive handler and backlogged .
From backlog it always goes here . Kerboom . . .
Fortunately , tcp_rcv_established and rcv_established
handle them correctly , but it is not case with
tcp_v6_hnd_req and tcp_v6_send_reset ( ) . - - ANK
*/
if ( skb - > protocol = = htons ( ETH_P_IP ) )
return tcp_v4_do_rcv ( sk , skb ) ;
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
if ( tcp_v6_inbound_md5_hash ( sk , skb ) )
goto discard ;
# endif
2006-09-01 02:28:39 +04:00
if ( sk_filter ( sk , skb ) )
2005-04-17 02:20:36 +04:00
goto discard ;
/*
* socket locking is here for SMP purposes as backlog rcv
* is currently called with bh processing disabled .
*/
/* Do Stevens' IPV6_PKTOPTIONS.
Yes , guys , it is the only place in our code , where we
may make it not affecting IPv4 .
The rest of code is protocol independent ,
and I do not like idea to uglify IPv4 .
Actually , all the idea behind IPV6_PKTOPTIONS
looks not very well thought . For now we latch
options , received in the last packet , enqueued
by tcp . Feel free to propose better solution .
- - ANK ( 980728 )
*/
if ( np - > rxopt . all )
opt_skb = skb_clone ( skb , GFP_ATOMIC ) ;
if ( sk - > sk_state = = TCP_ESTABLISHED ) { /* Fast path */
TCP_CHECK_TIMER ( sk ) ;
if ( tcp_rcv_established ( sk , skb , skb - > h . th , skb - > len ) )
goto reset ;
TCP_CHECK_TIMER ( sk ) ;
if ( opt_skb )
goto ipv6_pktoptions ;
return 0 ;
}
if ( skb - > len < ( skb - > h . th - > doff < < 2 ) | | tcp_checksum_complete ( skb ) )
goto csum_err ;
if ( sk - > sk_state = = TCP_LISTEN ) {
struct sock * nsk = tcp_v6_hnd_req ( sk , skb ) ;
if ( ! nsk )
goto discard ;
/*
* Queue it on the new socket if the new socket is active ,
* otherwise we just shortcircuit this and continue with
* the new socket . .
*/
if ( nsk ! = sk ) {
if ( tcp_child_process ( sk , nsk , skb ) )
goto reset ;
if ( opt_skb )
__kfree_skb ( opt_skb ) ;
return 0 ;
}
}
TCP_CHECK_TIMER ( sk ) ;
if ( tcp_rcv_state_process ( sk , skb , skb - > h . th , skb - > len ) )
goto reset ;
TCP_CHECK_TIMER ( sk ) ;
if ( opt_skb )
goto ipv6_pktoptions ;
return 0 ;
reset :
2006-11-15 06:07:45 +03:00
tcp_v6_send_reset ( sk , skb ) ;
2005-04-17 02:20:36 +04:00
discard :
if ( opt_skb )
__kfree_skb ( opt_skb ) ;
kfree_skb ( skb ) ;
return 0 ;
csum_err :
TCP_INC_STATS_BH ( TCP_MIB_INERRS ) ;
goto discard ;
ipv6_pktoptions :
/* Do you ask, what is it?
1. skb was enqueued by tcp .
2. skb is added to tail of read queue , rather than out of order .
3. socket is not in passive state .
4. Finally , it really contains options , which user wants to receive .
*/
tp = tcp_sk ( sk ) ;
if ( TCP_SKB_CB ( opt_skb ) - > end_seq = = tp - > rcv_nxt & &
! ( ( 1 < < sk - > sk_state ) & ( TCPF_CLOSE | TCPF_LISTEN ) ) ) {
[IPV6]: Support several new sockopt / ancillary data in Advanced API (RFC3542).
Support several new socket options / ancillary data:
IPV6_RECVPKTINFO, IPV6_PKTINFO,
IPV6_RECVHOPOPTS, IPV6_HOPOPTS,
IPV6_RECVDSTOPTS, IPV6_DSTOPTS, IPV6_RTHDRDSTOPTS,
IPV6_RECVRTHDR, IPV6_RTHDR,
IPV6_RECVHOPOPTS, IPV6_HOPOPTS
Old semantics are preserved as IPV6_2292xxxx so that
we can maintain backward compatibility.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
2005-09-08 04:59:17 +04:00
if ( np - > rxopt . bits . rxinfo | | np - > rxopt . bits . rxoinfo )
2005-08-12 16:19:38 +04:00
np - > mcast_oif = inet6_iif ( opt_skb ) ;
[IPV6]: Support several new sockopt / ancillary data in Advanced API (RFC3542).
Support several new socket options / ancillary data:
IPV6_RECVPKTINFO, IPV6_PKTINFO,
IPV6_RECVHOPOPTS, IPV6_HOPOPTS,
IPV6_RECVDSTOPTS, IPV6_DSTOPTS, IPV6_RTHDRDSTOPTS,
IPV6_RECVRTHDR, IPV6_RTHDR,
IPV6_RECVHOPOPTS, IPV6_HOPOPTS
Old semantics are preserved as IPV6_2292xxxx so that
we can maintain backward compatibility.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
2005-09-08 04:59:17 +04:00
if ( np - > rxopt . bits . rxhlim | | np - > rxopt . bits . rxohlim )
2005-04-17 02:20:36 +04:00
np - > mcast_hops = opt_skb - > nh . ipv6h - > hop_limit ;
if ( ipv6_opt_accepted ( sk , opt_skb ) ) {
skb_set_owner_r ( opt_skb , sk ) ;
opt_skb = xchg ( & np - > pktoptions , opt_skb ) ;
} else {
__kfree_skb ( opt_skb ) ;
opt_skb = xchg ( & np - > pktoptions , NULL ) ;
}
}
if ( opt_skb )
kfree_skb ( opt_skb ) ;
return 0 ;
}
2006-01-07 10:02:34 +03:00
static int tcp_v6_rcv ( struct sk_buff * * pskb )
2005-04-17 02:20:36 +04:00
{
struct sk_buff * skb = * pskb ;
struct tcphdr * th ;
struct sock * sk ;
int ret ;
if ( skb - > pkt_type ! = PACKET_HOST )
goto discard_it ;
/*
* Count it even if it ' s bad .
*/
TCP_INC_STATS_BH ( TCP_MIB_INSEGS ) ;
if ( ! pskb_may_pull ( skb , sizeof ( struct tcphdr ) ) )
goto discard_it ;
th = skb - > h . th ;
if ( th - > doff < sizeof ( struct tcphdr ) / 4 )
goto bad_packet ;
if ( ! pskb_may_pull ( skb , th - > doff * 4 ) )
goto discard_it ;
if ( ( skb - > ip_summed ! = CHECKSUM_UNNECESSARY & &
2005-11-11 00:01:24 +03:00
tcp_v6_checksum_init ( skb ) ) )
2005-04-17 02:20:36 +04:00
goto bad_packet ;
th = skb - > h . th ;
TCP_SKB_CB ( skb ) - > seq = ntohl ( th - > seq ) ;
TCP_SKB_CB ( skb ) - > end_seq = ( TCP_SKB_CB ( skb ) - > seq + th - > syn + th - > fin +
skb - > len - th - > doff * 4 ) ;
TCP_SKB_CB ( skb ) - > ack_seq = ntohl ( th - > ack_seq ) ;
TCP_SKB_CB ( skb ) - > when = 0 ;
TCP_SKB_CB ( skb ) - > flags = ipv6_get_dsfield ( skb - > nh . ipv6h ) ;
TCP_SKB_CB ( skb ) - > sacked = 0 ;
2005-08-12 16:19:38 +04:00
sk = __inet6_lookup ( & tcp_hashinfo , & skb - > nh . ipv6h - > saddr , th - > source ,
& skb - > nh . ipv6h - > daddr , ntohs ( th - > dest ) ,
inet6_iif ( skb ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! sk )
goto no_tcp_socket ;
process :
if ( sk - > sk_state = = TCP_TIME_WAIT )
goto do_time_wait ;
if ( ! xfrm6_policy_check ( sk , XFRM_POLICY_IN , skb ) )
goto discard_and_relse ;
2006-09-01 02:28:39 +04:00
if ( sk_filter ( sk , skb ) )
2005-04-17 02:20:36 +04:00
goto discard_and_relse ;
skb - > dev = NULL ;
2006-09-26 09:28:47 +04:00
bh_lock_sock_nested ( sk ) ;
2005-04-17 02:20:36 +04:00
ret = 0 ;
if ( ! sock_owned_by_user ( sk ) ) {
2006-05-24 05:05:53 +04:00
# ifdef CONFIG_NET_DMA
struct tcp_sock * tp = tcp_sk ( sk ) ;
if ( tp - > ucopy . dma_chan )
ret = tcp_v6_do_rcv ( sk , skb ) ;
else
# endif
{
if ( ! tcp_prequeue ( sk , skb ) )
ret = tcp_v6_do_rcv ( sk , skb ) ;
}
2005-04-17 02:20:36 +04:00
} else
sk_add_backlog ( sk , skb ) ;
bh_unlock_sock ( sk ) ;
sock_put ( sk ) ;
return ret ? - 1 : 0 ;
no_tcp_socket :
if ( ! xfrm6_policy_check ( NULL , XFRM_POLICY_IN , skb ) )
goto discard_it ;
if ( skb - > len < ( th - > doff < < 2 ) | | tcp_checksum_complete ( skb ) ) {
bad_packet :
TCP_INC_STATS_BH ( TCP_MIB_INERRS ) ;
} else {
2006-11-15 06:07:45 +03:00
tcp_v6_send_reset ( NULL , skb ) ;
2005-04-17 02:20:36 +04:00
}
discard_it :
/*
* Discard frame
*/
kfree_skb ( skb ) ;
return 0 ;
discard_and_relse :
sock_put ( sk ) ;
goto discard_it ;
do_time_wait :
if ( ! xfrm6_policy_check ( NULL , XFRM_POLICY_IN , skb ) ) {
2006-10-11 06:41:46 +04:00
inet_twsk_put ( inet_twsk ( sk ) ) ;
2005-04-17 02:20:36 +04:00
goto discard_it ;
}
if ( skb - > len < ( th - > doff < < 2 ) | | tcp_checksum_complete ( skb ) ) {
TCP_INC_STATS_BH ( TCP_MIB_INERRS ) ;
2006-10-11 06:41:46 +04:00
inet_twsk_put ( inet_twsk ( sk ) ) ;
2005-04-17 02:20:36 +04:00
goto discard_it ;
}
2006-10-11 06:41:46 +04:00
switch ( tcp_timewait_state_process ( inet_twsk ( sk ) , skb , th ) ) {
2005-04-17 02:20:36 +04:00
case TCP_TW_SYN :
{
struct sock * sk2 ;
2005-08-12 16:19:38 +04:00
sk2 = inet6_lookup_listener ( & tcp_hashinfo ,
& skb - > nh . ipv6h - > daddr ,
ntohs ( th - > dest ) , inet6_iif ( skb ) ) ;
2005-04-17 02:20:36 +04:00
if ( sk2 ! = NULL ) {
2005-08-10 07:44:40 +04:00
struct inet_timewait_sock * tw = inet_twsk ( sk ) ;
inet_twsk_deschedule ( tw , & tcp_death_row ) ;
inet_twsk_put ( tw ) ;
2005-04-17 02:20:36 +04:00
sk = sk2 ;
goto process ;
}
/* Fall through to ACK */
}
case TCP_TW_ACK :
tcp_v6_timewait_ack ( sk , skb ) ;
break ;
case TCP_TW_RST :
goto no_tcp_socket ;
case TCP_TW_SUCCESS : ;
}
goto discard_it ;
}
static int tcp_v6_remember_stamp ( struct sock * sk )
{
/* Alas, not yet... */
return 0 ;
}
2005-12-14 10:15:52 +03:00
static struct inet_connection_sock_af_ops ipv6_specific = {
2006-03-21 09:48:35 +03:00
. queue_xmit = inet6_csk_xmit ,
. send_check = tcp_v6_send_check ,
. rebuild_header = inet6_sk_rebuild_header ,
. conn_request = tcp_v6_conn_request ,
. syn_recv_sock = tcp_v6_syn_recv_sock ,
. remember_stamp = tcp_v6_remember_stamp ,
. net_header_len = sizeof ( struct ipv6hdr ) ,
. setsockopt = ipv6_setsockopt ,
. getsockopt = ipv6_getsockopt ,
. addr2sockaddr = inet6_csk_addr2sockaddr ,
. sockaddr_len = sizeof ( struct sockaddr_in6 ) ,
2006-03-21 09:45:21 +03:00
# ifdef CONFIG_COMPAT
2006-03-21 09:48:35 +03:00
. compat_setsockopt = compat_ipv6_setsockopt ,
. compat_getsockopt = compat_ipv6_getsockopt ,
2006-03-21 09:45:21 +03:00
# endif
2005-04-17 02:20:36 +04:00
} ;
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
2006-11-15 06:53:22 +03:00
static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
2006-11-15 06:07:45 +03:00
. md5_lookup = tcp_v6_md5_lookup ,
. calc_md5_hash = tcp_v6_calc_md5_hash ,
. md5_add = tcp_v6_md5_add_func ,
. md5_parse = tcp_v6_parse_md5_keys ,
} ;
2006-11-15 06:53:22 +03:00
# endif
2006-11-15 06:07:45 +03:00
2005-04-17 02:20:36 +04:00
/*
* TCP over IPv4 via INET6 API
*/
2005-12-14 10:15:52 +03:00
static struct inet_connection_sock_af_ops ipv6_mapped = {
2006-03-21 09:48:35 +03:00
. queue_xmit = ip_queue_xmit ,
. send_check = tcp_v4_send_check ,
. rebuild_header = inet_sk_rebuild_header ,
. conn_request = tcp_v6_conn_request ,
. syn_recv_sock = tcp_v6_syn_recv_sock ,
. remember_stamp = tcp_v4_remember_stamp ,
. net_header_len = sizeof ( struct iphdr ) ,
. setsockopt = ipv6_setsockopt ,
. getsockopt = ipv6_getsockopt ,
. addr2sockaddr = inet6_csk_addr2sockaddr ,
. sockaddr_len = sizeof ( struct sockaddr_in6 ) ,
2006-03-21 09:45:21 +03:00
# ifdef CONFIG_COMPAT
2006-03-21 09:48:35 +03:00
. compat_setsockopt = compat_ipv6_setsockopt ,
. compat_getsockopt = compat_ipv6_getsockopt ,
2006-03-21 09:45:21 +03:00
# endif
2005-04-17 02:20:36 +04:00
} ;
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
2006-11-15 06:53:22 +03:00
static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
2006-11-15 06:07:45 +03:00
. md5_lookup = tcp_v4_md5_lookup ,
. calc_md5_hash = tcp_v4_calc_md5_hash ,
. md5_add = tcp_v6_md5_add_func ,
. md5_parse = tcp_v6_parse_md5_keys ,
} ;
2006-11-15 06:53:22 +03:00
# endif
2006-11-15 06:07:45 +03:00
2005-04-17 02:20:36 +04:00
/* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc ( ) so need not be done here .
*/
static int tcp_v6_init_sock ( struct sock * sk )
{
2005-08-10 11:03:31 +04:00
struct inet_connection_sock * icsk = inet_csk ( sk ) ;
2005-04-17 02:20:36 +04:00
struct tcp_sock * tp = tcp_sk ( sk ) ;
skb_queue_head_init ( & tp - > out_of_order_queue ) ;
tcp_init_xmit_timers ( sk ) ;
tcp_prequeue_init ( tp ) ;
2005-08-10 11:03:31 +04:00
icsk - > icsk_rto = TCP_TIMEOUT_INIT ;
2005-04-17 02:20:36 +04:00
tp - > mdev = TCP_TIMEOUT_INIT ;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed - ACK and congestion control
* algorithms that we must have the following bandaid to talk
* efficiently to them . - DaveM
*/
tp - > snd_cwnd = 2 ;
/* See draft-stevens-tcpca-spec-01 for discussion of the
* initialization of these values .
*/
tp - > snd_ssthresh = 0x7fffffff ;
tp - > snd_cwnd_clamp = ~ 0 ;
2005-07-06 02:24:38 +04:00
tp - > mss_cache = 536 ;
2005-04-17 02:20:36 +04:00
tp - > reordering = sysctl_tcp_reordering ;
sk - > sk_state = TCP_CLOSE ;
2005-12-14 10:15:52 +03:00
icsk - > icsk_af_ops = & ipv6_specific ;
2005-08-10 11:03:31 +04:00
icsk - > icsk_ca_ops = & tcp_init_congestion_ops ;
2005-12-14 10:26:10 +03:00
icsk - > icsk_sync_mss = tcp_sync_mss ;
2005-04-17 02:20:36 +04:00
sk - > sk_write_space = sk_stream_write_space ;
sock_set_flag ( sk , SOCK_USE_WRITE_QUEUE ) ;
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
tp - > af_specific = & tcp_sock_ipv6_specific ;
# endif
2005-04-17 02:20:36 +04:00
sk - > sk_sndbuf = sysctl_tcp_wmem [ 1 ] ;
sk - > sk_rcvbuf = sysctl_tcp_rmem [ 1 ] ;
atomic_inc ( & tcp_sockets_allocated ) ;
return 0 ;
}
static int tcp_v6_destroy_sock ( struct sock * sk )
{
2006-11-15 06:07:45 +03:00
# ifdef CONFIG_TCP_MD5SIG
/* Clean up the MD5 key list */
if ( tcp_sk ( sk ) - > md5sig_info )
tcp_v6_clear_md5_list ( sk ) ;
# endif
2005-04-17 02:20:36 +04:00
tcp_v4_destroy_sock ( sk ) ;
return inet6_destroy_sock ( sk ) ;
}
/* Proc filesystem TCPv6 sock list dumping. */
static void get_openreq6 ( struct seq_file * seq ,
2005-06-19 09:47:21 +04:00
struct sock * sk , struct request_sock * req , int i , int uid )
2005-04-17 02:20:36 +04:00
{
int ttd = req - > expires - jiffies ;
2005-12-14 10:15:40 +03:00
struct in6_addr * src = & inet6_rsk ( req ) - > loc_addr ;
struct in6_addr * dest = & inet6_rsk ( req ) - > rmt_addr ;
2005-04-17 02:20:36 +04:00
if ( ttd < 0 )
ttd = 0 ;
seq_printf ( seq ,
" %4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p \n " ,
i ,
src - > s6_addr32 [ 0 ] , src - > s6_addr32 [ 1 ] ,
src - > s6_addr32 [ 2 ] , src - > s6_addr32 [ 3 ] ,
ntohs ( inet_sk ( sk ) - > sport ) ,
dest - > s6_addr32 [ 0 ] , dest - > s6_addr32 [ 1 ] ,
dest - > s6_addr32 [ 2 ] , dest - > s6_addr32 [ 3 ] ,
[NET] Generalise TCP's struct open_request minisock infrastructure
Kept this first changeset minimal, without changing existing names to
ease peer review.
Basicaly tcp_openreq_alloc now receives the or_calltable, that in turn
has two new members:
->slab, that replaces tcp_openreq_cachep
->obj_size, to inform the size of the openreq descendant for
a specific protocol
The protocol specific fields in struct open_request were moved to a
class hierarchy, with the things that are common to all connection
oriented PF_INET protocols in struct inet_request_sock, the TCP ones
in tcp_request_sock, that is an inet_request_sock, that is an
open_request.
I.e. this uses the same approach used for the struct sock class
hierarchy, with sk_prot indicating if the protocol wants to use the
open_request infrastructure by filling in sk_prot->rsk_prot with an
or_calltable.
Results? Performance is improved and TCP v4 now uses only 64 bytes per
open request minisock, down from 96 without this patch :-)
Next changeset will rename some of the structs, fields and functions
mentioned above, struct or_calltable is way unclear, better name it
struct request_sock_ops, s/struct open_request/struct request_sock/g,
etc.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-19 09:46:52 +04:00
ntohs ( inet_rsk ( req ) - > rmt_port ) ,
2005-04-17 02:20:36 +04:00
TCP_SYN_RECV ,
0 , 0 , /* could print option size, but that is af dependent. */
1 , /* timers active (only the expire timer) */
jiffies_to_clock_t ( ttd ) ,
req - > retrans ,
uid ,
0 , /* non standard timer */
0 , /* open_requests have no inode */
0 , req ) ;
}
static void get_tcp6_sock ( struct seq_file * seq , struct sock * sp , int i )
{
struct in6_addr * dest , * src ;
__u16 destp , srcp ;
int timer_active ;
unsigned long timer_expires ;
struct inet_sock * inet = inet_sk ( sp ) ;
struct tcp_sock * tp = tcp_sk ( sp ) ;
2005-08-10 07:10:42 +04:00
const struct inet_connection_sock * icsk = inet_csk ( sp ) ;
2005-04-17 02:20:36 +04:00
struct ipv6_pinfo * np = inet6_sk ( sp ) ;
dest = & np - > daddr ;
src = & np - > rcv_saddr ;
destp = ntohs ( inet - > dport ) ;
srcp = ntohs ( inet - > sport ) ;
2005-08-10 07:10:42 +04:00
if ( icsk - > icsk_pending = = ICSK_TIME_RETRANS ) {
2005-04-17 02:20:36 +04:00
timer_active = 1 ;
2005-08-10 07:10:42 +04:00
timer_expires = icsk - > icsk_timeout ;
} else if ( icsk - > icsk_pending = = ICSK_TIME_PROBE0 ) {
2005-04-17 02:20:36 +04:00
timer_active = 4 ;
2005-08-10 07:10:42 +04:00
timer_expires = icsk - > icsk_timeout ;
2005-04-17 02:20:36 +04:00
} else if ( timer_pending ( & sp - > sk_timer ) ) {
timer_active = 2 ;
timer_expires = sp - > sk_timer . expires ;
} else {
timer_active = 0 ;
timer_expires = jiffies ;
}
seq_printf ( seq ,
" %4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d \n " ,
i ,
src - > s6_addr32 [ 0 ] , src - > s6_addr32 [ 1 ] ,
src - > s6_addr32 [ 2 ] , src - > s6_addr32 [ 3 ] , srcp ,
dest - > s6_addr32 [ 0 ] , dest - > s6_addr32 [ 1 ] ,
dest - > s6_addr32 [ 2 ] , dest - > s6_addr32 [ 3 ] , destp ,
sp - > sk_state ,
2006-06-28 00:29:00 +04:00
tp - > write_seq - tp - > snd_una ,
( sp - > sk_state = = TCP_LISTEN ) ? sp - > sk_ack_backlog : ( tp - > rcv_nxt - tp - > copied_seq ) ,
2005-04-17 02:20:36 +04:00
timer_active ,
jiffies_to_clock_t ( timer_expires - jiffies ) ,
2005-08-10 07:10:42 +04:00
icsk - > icsk_retransmits ,
2005-04-17 02:20:36 +04:00
sock_i_uid ( sp ) ,
2005-08-10 11:03:31 +04:00
icsk - > icsk_probes_out ,
2005-04-17 02:20:36 +04:00
sock_i_ino ( sp ) ,
atomic_read ( & sp - > sk_refcnt ) , sp ,
2005-08-10 07:10:42 +04:00
icsk - > icsk_rto ,
icsk - > icsk_ack . ato ,
( icsk - > icsk_ack . quick < < 1 ) | icsk - > icsk_ack . pingpong ,
2005-04-17 02:20:36 +04:00
tp - > snd_cwnd , tp - > snd_ssthresh > = 0xFFFF ? - 1 : tp - > snd_ssthresh
) ;
}
static void get_timewait6_sock ( struct seq_file * seq ,
2005-08-10 07:09:30 +04:00
struct inet_timewait_sock * tw , int i )
2005-04-17 02:20:36 +04:00
{
struct in6_addr * dest , * src ;
__u16 destp , srcp ;
2005-12-14 10:23:09 +03:00
struct inet6_timewait_sock * tw6 = inet6_twsk ( ( struct sock * ) tw ) ;
2005-04-17 02:20:36 +04:00
int ttd = tw - > tw_ttd - jiffies ;
if ( ttd < 0 )
ttd = 0 ;
2005-12-14 10:23:09 +03:00
dest = & tw6 - > tw_v6_daddr ;
src = & tw6 - > tw_v6_rcv_saddr ;
2005-04-17 02:20:36 +04:00
destp = ntohs ( tw - > tw_dport ) ;
srcp = ntohs ( tw - > tw_sport ) ;
seq_printf ( seq ,
" %4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p \n " ,
i ,
src - > s6_addr32 [ 0 ] , src - > s6_addr32 [ 1 ] ,
src - > s6_addr32 [ 2 ] , src - > s6_addr32 [ 3 ] , srcp ,
dest - > s6_addr32 [ 0 ] , dest - > s6_addr32 [ 1 ] ,
dest - > s6_addr32 [ 2 ] , dest - > s6_addr32 [ 3 ] , destp ,
tw - > tw_substate , 0 , 0 ,
3 , jiffies_to_clock_t ( ttd ) , 0 , 0 , 0 , 0 ,
atomic_read ( & tw - > tw_refcnt ) , tw ) ;
}
# ifdef CONFIG_PROC_FS
static int tcp6_seq_show ( struct seq_file * seq , void * v )
{
struct tcp_iter_state * st ;
if ( v = = SEQ_START_TOKEN ) {
seq_puts ( seq ,
" sl "
" local_address "
" remote_address "
" st tx_queue rx_queue tr tm->when retrnsmt "
" uid timeout inode \n " ) ;
goto out ;
}
st = seq - > private ;
switch ( st - > state ) {
case TCP_SEQ_STATE_LISTENING :
case TCP_SEQ_STATE_ESTABLISHED :
get_tcp6_sock ( seq , v , st - > num ) ;
break ;
case TCP_SEQ_STATE_OPENREQ :
get_openreq6 ( seq , st - > syn_wait_sk , v , st - > num , st - > uid ) ;
break ;
case TCP_SEQ_STATE_TIME_WAIT :
get_timewait6_sock ( seq , v , st - > num ) ;
break ;
}
out :
return 0 ;
}
static struct file_operations tcp6_seq_fops ;
static struct tcp_seq_afinfo tcp6_seq_afinfo = {
. owner = THIS_MODULE ,
. name = " tcp6 " ,
. family = AF_INET6 ,
. seq_show = tcp6_seq_show ,
. seq_fops = & tcp6_seq_fops ,
} ;
int __init tcp6_proc_init ( void )
{
return tcp_proc_register ( & tcp6_seq_afinfo ) ;
}
void tcp6_proc_exit ( void )
{
tcp_proc_unregister ( & tcp6_seq_afinfo ) ;
}
# endif
struct proto tcpv6_prot = {
. name = " TCPv6 " ,
. owner = THIS_MODULE ,
. close = tcp_close ,
. connect = tcp_v6_connect ,
. disconnect = tcp_disconnect ,
2005-08-10 07:10:42 +04:00
. accept = inet_csk_accept ,
2005-04-17 02:20:36 +04:00
. ioctl = tcp_ioctl ,
. init = tcp_v6_init_sock ,
. destroy = tcp_v6_destroy_sock ,
. shutdown = tcp_shutdown ,
. setsockopt = tcp_setsockopt ,
. getsockopt = tcp_getsockopt ,
. sendmsg = tcp_sendmsg ,
. recvmsg = tcp_recvmsg ,
. backlog_rcv = tcp_v6_do_rcv ,
. hash = tcp_v6_hash ,
. unhash = tcp_unhash ,
. get_port = tcp_v6_get_port ,
. enter_memory_pressure = tcp_enter_memory_pressure ,
. sockets_allocated = & tcp_sockets_allocated ,
. memory_allocated = & tcp_memory_allocated ,
. memory_pressure = & tcp_memory_pressure ,
2005-08-10 07:11:41 +04:00
. orphan_count = & tcp_orphan_count ,
2005-04-17 02:20:36 +04:00
. sysctl_mem = sysctl_tcp_mem ,
. sysctl_wmem = sysctl_tcp_wmem ,
. sysctl_rmem = sysctl_tcp_rmem ,
. max_header = MAX_TCP_HEADER ,
. obj_size = sizeof ( struct tcp6_sock ) ,
2005-12-14 10:25:19 +03:00
. twsk_prot = & tcp6_timewait_sock_ops ,
2005-06-19 09:47:21 +04:00
. rsk_prot = & tcp6_request_sock_ops ,
2006-03-21 09:48:35 +03:00
# ifdef CONFIG_COMPAT
. compat_setsockopt = compat_tcp_setsockopt ,
. compat_getsockopt = compat_tcp_getsockopt ,
# endif
2005-04-17 02:20:36 +04:00
} ;
static struct inet6_protocol tcpv6_protocol = {
. handler = tcp_v6_rcv ,
. err_handler = tcp_v6_err ,
2006-07-09 00:34:56 +04:00
. gso_send_check = tcp_v6_gso_send_check ,
2006-07-01 00:36:15 +04:00
. gso_segment = tcp_tso_segment ,
2005-04-17 02:20:36 +04:00
. flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL ,
} ;
static struct inet_protosw tcpv6_protosw = {
. type = SOCK_STREAM ,
. protocol = IPPROTO_TCP ,
. prot = & tcpv6_prot ,
. ops = & inet6_stream_ops ,
. capability = - 1 ,
. no_check = 0 ,
2005-12-14 10:26:10 +03:00
. flags = INET_PROTOSW_PERMANENT |
INET_PROTOSW_ICSK ,
2005-04-17 02:20:36 +04:00
} ;
void __init tcpv6_init ( void )
{
/* register inet6 protocol */
if ( inet6_add_protocol ( & tcpv6_protocol , IPPROTO_TCP ) < 0 )
printk ( KERN_ERR " tcpv6_init: Could not register protocol \n " ) ;
inet6_register_protosw ( & tcpv6_protosw ) ;
2006-01-12 02:53:04 +03:00
2006-03-21 09:01:03 +03:00
if ( inet_csk_ctl_sock_create ( & tcp6_socket , PF_INET6 , SOCK_RAW ,
IPPROTO_TCP ) < 0 )
2006-01-12 02:53:04 +03:00
panic ( " Failed to create the TCPv6 control socket. \n " ) ;
2005-04-17 02:20:36 +04:00
}