2018-08-28 21:36:20 +03:00
// SPDX-License-Identifier: GPL-2.0
# define _GNU_SOURCE
# include <arpa/inet.h>
# include <errno.h>
# include <error.h>
# include <linux/in.h>
# include <netinet/ip.h>
# include <netinet/ip6.h>
# include <netinet/udp.h>
# include <stdbool.h>
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
# include <time.h>
# include <unistd.h>
static bool cfg_do_ipv4 ;
static bool cfg_do_ipv6 ;
static bool cfg_verbose ;
static bool cfg_overlap ;
2019-01-22 21:02:53 +03:00
static bool cfg_permissive ;
2018-08-28 21:36:20 +03:00
static unsigned short cfg_port = 9000 ;
const struct in_addr addr4 = { . s_addr = __constant_htonl ( INADDR_LOOPBACK + 2 ) } ;
2018-09-21 21:17:17 +03:00
const struct in6_addr addr6 = IN6ADDR_LOOPBACK_INIT ;
2018-08-28 21:36:20 +03:00
# define IP4_HLEN (sizeof(struct iphdr))
# define IP6_HLEN (sizeof(struct ip6_hdr))
# define UDP_HLEN (sizeof(struct udphdr))
2018-09-21 21:17:17 +03:00
/* IPv6 fragment header lenth. */
# define FRAG_HLEN 8
static int payload_len ;
2018-08-28 21:36:20 +03:00
static int max_frag_len ;
2019-01-22 21:02:53 +03:00
# define MSG_LEN_MAX 10000 /* Max UDP payload length. */
2018-08-28 21:36:20 +03:00
# define IP4_MF (1u << 13) /* IPv4 MF flag. */
2018-09-21 21:17:17 +03:00
# define IP6_MF (1) /* IPv6 MF flag. */
# define CSUM_MANGLED_0 (0xffff)
2018-08-28 21:36:20 +03:00
static uint8_t udp_payload [ MSG_LEN_MAX ] ;
static uint8_t ip_frame [ IP_MAXPACKET ] ;
2018-09-21 21:17:17 +03:00
static uint32_t ip_id = 0xabcd ;
2018-08-28 21:36:20 +03:00
static int msg_counter ;
static int frag_counter ;
static unsigned int seed ;
/* Receive a UDP packet. Validate it matches udp_payload. */
static void recv_validate_udp ( int fd_udp )
{
ssize_t ret ;
static uint8_t recv_buff [ MSG_LEN_MAX ] ;
2018-09-21 21:17:17 +03:00
ret = recv ( fd_udp , recv_buff , payload_len , 0 ) ;
2018-08-28 21:36:20 +03:00
msg_counter + + ;
if ( cfg_overlap ) {
2019-01-22 21:02:53 +03:00
if ( ret = = - 1 & & ( errno = = ETIMEDOUT | | errno = = EAGAIN ) )
return ; /* OK */
if ( ! cfg_permissive ) {
if ( ret ! = - 1 )
error ( 1 , 0 , " recv: expected timeout; got %d " ,
( int ) ret ) ;
error ( 1 , errno , " recv: expected timeout: %d " , errno ) ;
}
2018-08-28 21:36:20 +03:00
}
if ( ret = = - 1 )
2018-09-21 21:17:17 +03:00
error ( 1 , errno , " recv: payload_len = %d max_frag_len = %d " ,
payload_len , max_frag_len ) ;
if ( ret ! = payload_len )
error ( 1 , 0 , " recv: wrong size: %d vs %d " , ( int ) ret , payload_len ) ;
if ( memcmp ( udp_payload , recv_buff , payload_len ) )
2018-08-28 21:36:20 +03:00
error ( 1 , 0 , " recv: wrong data " ) ;
}
static uint32_t raw_checksum ( uint8_t * buf , int len , uint32_t sum )
{
int i ;
for ( i = 0 ; i < ( len & ~ 1U ) ; i + = 2 ) {
sum + = ( u_int16_t ) ntohs ( * ( ( u_int16_t * ) ( buf + i ) ) ) ;
if ( sum > 0xffff )
sum - = 0xffff ;
}
if ( i < len ) {
sum + = buf [ i ] < < 8 ;
if ( sum > 0xffff )
sum - = 0xffff ;
}
return sum ;
}
static uint16_t udp_checksum ( struct ip * iphdr , struct udphdr * udphdr )
{
uint32_t sum = 0 ;
2018-09-21 21:17:17 +03:00
uint16_t res ;
2018-08-28 21:36:20 +03:00
sum = raw_checksum ( ( uint8_t * ) & iphdr - > ip_src , 2 * sizeof ( iphdr - > ip_src ) ,
2018-09-21 21:17:17 +03:00
IPPROTO_UDP + ( uint32_t ) ( UDP_HLEN + payload_len ) ) ;
sum = raw_checksum ( ( uint8_t * ) udphdr , UDP_HLEN , sum ) ;
sum = raw_checksum ( ( uint8_t * ) udp_payload , payload_len , sum ) ;
res = 0xffff & ~ sum ;
if ( res )
return htons ( res ) ;
else
return CSUM_MANGLED_0 ;
}
static uint16_t udp6_checksum ( struct ip6_hdr * iphdr , struct udphdr * udphdr )
{
uint32_t sum = 0 ;
uint16_t res ;
sum = raw_checksum ( ( uint8_t * ) & iphdr - > ip6_src , 2 * sizeof ( iphdr - > ip6_src ) ,
IPPROTO_UDP ) ;
sum = raw_checksum ( ( uint8_t * ) & udphdr - > len , sizeof ( udphdr - > len ) , sum ) ;
2018-08-28 21:36:20 +03:00
sum = raw_checksum ( ( uint8_t * ) udphdr , UDP_HLEN , sum ) ;
2018-09-21 21:17:17 +03:00
sum = raw_checksum ( ( uint8_t * ) udp_payload , payload_len , sum ) ;
res = 0xffff & ~ sum ;
if ( res )
return htons ( res ) ;
else
return CSUM_MANGLED_0 ;
2018-08-28 21:36:20 +03:00
}
static void send_fragment ( int fd_raw , struct sockaddr * addr , socklen_t alen ,
2018-09-21 21:17:17 +03:00
int offset , bool ipv6 )
2018-08-28 21:36:20 +03:00
{
int frag_len ;
int res ;
2018-09-21 21:17:17 +03:00
int payload_offset = offset > 0 ? offset - UDP_HLEN : 0 ;
uint8_t * frag_start = ipv6 ? ip_frame + IP6_HLEN + FRAG_HLEN :
ip_frame + IP4_HLEN ;
if ( offset = = 0 ) {
struct udphdr udphdr ;
udphdr . source = htons ( cfg_port + 1 ) ;
udphdr . dest = htons ( cfg_port ) ;
udphdr . len = htons ( UDP_HLEN + payload_len ) ;
udphdr . check = 0 ;
if ( ipv6 )
udphdr . check = udp6_checksum ( ( struct ip6_hdr * ) ip_frame , & udphdr ) ;
else
udphdr . check = udp_checksum ( ( struct ip * ) ip_frame , & udphdr ) ;
memcpy ( frag_start , & udphdr , UDP_HLEN ) ;
}
2018-08-28 21:36:20 +03:00
2018-09-21 21:17:17 +03:00
if ( ipv6 ) {
struct ip6_hdr * ip6hdr = ( struct ip6_hdr * ) ip_frame ;
struct ip6_frag * fraghdr = ( struct ip6_frag * ) ( ip_frame + IP6_HLEN ) ;
if ( payload_len - payload_offset < = max_frag_len & & offset > 0 ) {
/* This is the last fragment. */
frag_len = FRAG_HLEN + payload_len - payload_offset ;
fraghdr - > ip6f_offlg = htons ( offset ) ;
} else {
frag_len = FRAG_HLEN + max_frag_len ;
fraghdr - > ip6f_offlg = htons ( offset | IP6_MF ) ;
}
ip6hdr - > ip6_plen = htons ( frag_len ) ;
if ( offset = = 0 )
memcpy ( frag_start + UDP_HLEN , udp_payload ,
frag_len - FRAG_HLEN - UDP_HLEN ) ;
else
memcpy ( frag_start , udp_payload + payload_offset ,
frag_len - FRAG_HLEN ) ;
frag_len + = IP6_HLEN ;
2018-08-28 21:36:20 +03:00
} else {
2018-09-21 21:17:17 +03:00
struct ip * iphdr = ( struct ip * ) ip_frame ;
if ( payload_len - payload_offset < = max_frag_len & & offset > 0 ) {
/* This is the last fragment. */
frag_len = IP4_HLEN + payload_len - payload_offset ;
iphdr - > ip_off = htons ( offset / 8 ) ;
} else {
frag_len = IP4_HLEN + max_frag_len ;
iphdr - > ip_off = htons ( offset / 8 | IP4_MF ) ;
}
iphdr - > ip_len = htons ( frag_len ) ;
if ( offset = = 0 )
memcpy ( frag_start + UDP_HLEN , udp_payload ,
frag_len - IP4_HLEN - UDP_HLEN ) ;
else
memcpy ( frag_start , udp_payload + payload_offset ,
frag_len - IP4_HLEN ) ;
2018-08-28 21:36:20 +03:00
}
res = sendto ( fd_raw , ip_frame , frag_len , 0 , addr , alen ) ;
2020-06-02 21:38:37 +03:00
if ( res < 0 & & errno ! = EPERM )
2018-08-28 21:36:20 +03:00
error ( 1 , errno , " send_fragment " ) ;
2020-06-02 21:38:37 +03:00
if ( res > = 0 & & res ! = frag_len )
2018-08-28 21:36:20 +03:00
error ( 1 , 0 , " send_fragment: %d vs %d " , res , frag_len ) ;
frag_counter + + ;
}
2018-09-21 21:17:17 +03:00
static void send_udp_frags ( int fd_raw , struct sockaddr * addr ,
socklen_t alen , bool ipv6 )
2018-08-28 21:36:20 +03:00
{
struct ip * iphdr = ( struct ip * ) ip_frame ;
2018-09-21 21:17:17 +03:00
struct ip6_hdr * ip6hdr = ( struct ip6_hdr * ) ip_frame ;
2018-08-28 21:36:20 +03:00
int res ;
int offset ;
int frag_len ;
/* Send the UDP datagram using raw IP fragments: the 0th fragment
* has the UDP header ; other fragments are pieces of udp_payload
* split in chunks of frag_len size .
*
* Odd fragments ( 1 st , 3 rd , 5 th , etc . ) are sent out first , then
* even fragments ( 0 th , 2 nd , etc . ) are sent out .
*/
2018-09-21 21:17:17 +03:00
if ( ipv6 ) {
struct ip6_frag * fraghdr = ( struct ip6_frag * ) ( ip_frame + IP6_HLEN ) ;
( ( struct sockaddr_in6 * ) addr ) - > sin6_port = 0 ;
memset ( ip6hdr , 0 , sizeof ( * ip6hdr ) ) ;
ip6hdr - > ip6_flow = htonl ( 6 < < 28 ) ; /* Version. */
ip6hdr - > ip6_nxt = IPPROTO_FRAGMENT ;
ip6hdr - > ip6_hops = 255 ;
ip6hdr - > ip6_src = addr6 ;
ip6hdr - > ip6_dst = addr6 ;
fraghdr - > ip6f_nxt = IPPROTO_UDP ;
fraghdr - > ip6f_reserved = 0 ;
fraghdr - > ip6f_ident = htonl ( ip_id + + ) ;
} else {
memset ( iphdr , 0 , sizeof ( * iphdr ) ) ;
iphdr - > ip_hl = 5 ;
iphdr - > ip_v = 4 ;
iphdr - > ip_tos = 0 ;
iphdr - > ip_id = htons ( ip_id + + ) ;
iphdr - > ip_ttl = 0x40 ;
iphdr - > ip_p = IPPROTO_UDP ;
iphdr - > ip_src . s_addr = htonl ( INADDR_LOOPBACK ) ;
iphdr - > ip_dst = addr4 ;
iphdr - > ip_sum = 0 ;
}
2018-08-28 21:36:20 +03:00
2019-01-04 20:43:08 +03:00
/* Occasionally test in-order fragments. */
if ( ! cfg_overlap & & ( rand ( ) % 100 < 15 ) ) {
offset = 0 ;
while ( offset < ( UDP_HLEN + payload_len ) ) {
send_fragment ( fd_raw , addr , alen , offset , ipv6 ) ;
offset + = max_frag_len ;
}
return ;
}
/* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */
2019-01-22 21:02:53 +03:00
if ( ! cfg_overlap & & ( rand ( ) % 100 < 20 ) & &
2019-01-04 20:43:08 +03:00
( payload_len > 9 * max_frag_len ) ) {
offset = 6 * max_frag_len ;
while ( offset < ( UDP_HLEN + payload_len ) ) {
send_fragment ( fd_raw , addr , alen , offset , ipv6 ) ;
offset + = max_frag_len ;
}
offset = 3 * max_frag_len ;
while ( offset < 6 * max_frag_len ) {
send_fragment ( fd_raw , addr , alen , offset , ipv6 ) ;
offset + = max_frag_len ;
}
offset = 0 ;
while ( offset < 3 * max_frag_len ) {
send_fragment ( fd_raw , addr , alen , offset , ipv6 ) ;
offset + = max_frag_len ;
}
return ;
}
2018-08-28 21:36:20 +03:00
/* Odd fragments. */
2018-09-21 21:17:17 +03:00
offset = max_frag_len ;
while ( offset < ( UDP_HLEN + payload_len ) ) {
send_fragment ( fd_raw , addr , alen , offset , ipv6 ) ;
2019-01-04 20:43:08 +03:00
/* IPv4 ignores duplicates, so randomly send a duplicate. */
2019-01-22 21:02:53 +03:00
if ( rand ( ) % 100 = = 1 )
2019-01-04 20:43:08 +03:00
send_fragment ( fd_raw , addr , alen , offset , ipv6 ) ;
2018-08-28 21:36:20 +03:00
offset + = 2 * max_frag_len ;
}
if ( cfg_overlap ) {
2019-01-22 21:02:53 +03:00
/* Send an extra random fragment.
*
* Duplicates and some fragments completely inside
* previously sent fragments are dropped / ignored . So
* random offset and frag_len can result in a dropped
* fragment instead of a dropped queue / packet . Thus we
* hard - code offset and frag_len .
*/
if ( max_frag_len * 4 < payload_len | | max_frag_len < 16 ) {
/* not enough payload for random offset and frag_len. */
offset = 8 ;
frag_len = UDP_HLEN + max_frag_len ;
} else {
offset = rand ( ) % ( payload_len / 2 ) ;
frag_len = 2 * max_frag_len + 1 + rand ( ) % 256 ;
}
2018-09-21 21:17:17 +03:00
if ( ipv6 ) {
struct ip6_frag * fraghdr = ( struct ip6_frag * ) ( ip_frame + IP6_HLEN ) ;
2019-01-04 20:43:08 +03:00
/* sendto() returns EINVAL if offset + frag_len is too small. */
2018-09-21 21:17:17 +03:00
/* In IPv6 if !!(frag_len % 8), the fragment is dropped. */
frag_len & = ~ 0x7 ;
fraghdr - > ip6f_offlg = htons ( offset / 8 | IP6_MF ) ;
ip6hdr - > ip6_plen = htons ( frag_len ) ;
frag_len + = IP6_HLEN ;
} else {
2019-01-22 21:02:53 +03:00
frag_len + = IP4_HLEN ;
2018-09-21 21:17:17 +03:00
iphdr - > ip_off = htons ( offset / 8 | IP4_MF ) ;
iphdr - > ip_len = htons ( frag_len ) ;
}
2018-08-28 21:36:20 +03:00
res = sendto ( fd_raw , ip_frame , frag_len , 0 , addr , alen ) ;
2020-06-02 21:38:37 +03:00
if ( res < 0 & & errno ! = EPERM )
2019-01-04 20:43:08 +03:00
error ( 1 , errno , " sendto overlap: %d " , frag_len ) ;
2020-06-02 21:38:37 +03:00
if ( res > = 0 & & res ! = frag_len )
2018-08-28 21:36:20 +03:00
error ( 1 , 0 , " sendto overlap: %d vs %d " , ( int ) res , frag_len ) ;
frag_counter + + ;
}
2018-09-21 21:17:17 +03:00
/* Event fragments. */
offset = 0 ;
while ( offset < ( UDP_HLEN + payload_len ) ) {
send_fragment ( fd_raw , addr , alen , offset , ipv6 ) ;
2019-01-04 20:43:08 +03:00
/* IPv4 ignores duplicates, so randomly send a duplicate. */
2019-01-22 21:02:53 +03:00
if ( rand ( ) % 100 = = 1 )
2019-01-04 20:43:08 +03:00
send_fragment ( fd_raw , addr , alen , offset , ipv6 ) ;
2018-08-28 21:36:20 +03:00
offset + = 2 * max_frag_len ;
}
}
2018-09-21 21:17:17 +03:00
static void run_test ( struct sockaddr * addr , socklen_t alen , bool ipv6 )
2018-08-28 21:36:20 +03:00
{
2018-09-21 21:17:17 +03:00
int fd_tx_raw , fd_rx_udp ;
2019-01-04 20:43:08 +03:00
/* Frag queue timeout is set to one second in the calling script;
* socket timeout should be just a bit longer to avoid tests interfering
* with each other .
*/
struct timeval tv = { . tv_sec = 1 , . tv_usec = 10 } ;
2018-08-28 21:36:20 +03:00
int idx ;
2019-01-22 21:02:53 +03:00
int min_frag_len = 8 ;
2018-08-28 21:36:20 +03:00
/* Initialize the payload. */
for ( idx = 0 ; idx < MSG_LEN_MAX ; + + idx )
udp_payload [ idx ] = idx % 256 ;
/* Open sockets. */
fd_tx_raw = socket ( addr - > sa_family , SOCK_RAW , IPPROTO_RAW ) ;
if ( fd_tx_raw = = - 1 )
error ( 1 , errno , " socket tx_raw " ) ;
fd_rx_udp = socket ( addr - > sa_family , SOCK_DGRAM , 0 ) ;
if ( fd_rx_udp = = - 1 )
error ( 1 , errno , " socket rx_udp " ) ;
if ( bind ( fd_rx_udp , addr , alen ) )
error ( 1 , errno , " bind " ) ;
/* Fail fast. */
if ( setsockopt ( fd_rx_udp , SOL_SOCKET , SO_RCVTIMEO , & tv , sizeof ( tv ) ) )
error ( 1 , errno , " setsockopt rcv timeout " ) ;
2018-09-21 21:17:17 +03:00
for ( payload_len = min_frag_len ; payload_len < MSG_LEN_MAX ;
payload_len + = ( rand ( ) % 4096 ) ) {
2018-08-28 21:36:20 +03:00
if ( cfg_verbose )
2018-09-21 21:17:17 +03:00
printf ( " payload_len: %d \n " , payload_len ) ;
2019-01-04 20:43:08 +03:00
if ( cfg_overlap ) {
/* With overlaps, one send/receive pair below takes
* at least one second ( = = timeout ) to run , so there
* is not enough test time to run a nested loop :
* the full overlap test takes 20 - 30 seconds .
*/
max_frag_len = min_frag_len +
rand ( ) % ( 1500 - FRAG_HLEN - min_frag_len ) ;
2018-09-21 21:17:17 +03:00
send_udp_frags ( fd_tx_raw , addr , alen , ipv6 ) ;
2018-08-28 21:36:20 +03:00
recv_validate_udp ( fd_rx_udp ) ;
2019-01-04 20:43:08 +03:00
} else {
/* Without overlaps, each packet reassembly (== one
* send / receive pair below ) takes very little time to
* run , so we can easily afford more thourough testing
* with a nested loop : the full non - overlap test takes
* less than one second ) .
*/
max_frag_len = min_frag_len ;
do {
send_udp_frags ( fd_tx_raw , addr , alen , ipv6 ) ;
recv_validate_udp ( fd_rx_udp ) ;
max_frag_len + = 8 * ( rand ( ) % 8 ) ;
} while ( max_frag_len < ( 1500 - FRAG_HLEN ) & &
max_frag_len < = payload_len ) ;
}
2018-08-28 21:36:20 +03:00
}
/* Cleanup. */
if ( close ( fd_tx_raw ) )
error ( 1 , errno , " close tx_raw " ) ;
if ( close ( fd_rx_udp ) )
error ( 1 , errno , " close rx_udp " ) ;
if ( cfg_verbose )
printf ( " processed %d messages, %d fragments \n " ,
msg_counter , frag_counter ) ;
fprintf ( stderr , " PASS \n " ) ;
}
static void run_test_v4 ( void )
{
struct sockaddr_in addr = { 0 } ;
addr . sin_family = AF_INET ;
addr . sin_port = htons ( cfg_port ) ;
addr . sin_addr = addr4 ;
2018-09-21 21:17:17 +03:00
run_test ( ( void * ) & addr , sizeof ( addr ) , false /* !ipv6 */ ) ;
2018-08-28 21:36:20 +03:00
}
static void run_test_v6 ( void )
{
2018-09-21 21:17:17 +03:00
struct sockaddr_in6 addr = { 0 } ;
addr . sin6_family = AF_INET6 ;
addr . sin6_port = htons ( cfg_port ) ;
addr . sin6_addr = addr6 ;
run_test ( ( void * ) & addr , sizeof ( addr ) , true /* ipv6 */ ) ;
2018-08-28 21:36:20 +03:00
}
static void parse_opts ( int argc , char * * argv )
{
int c ;
2019-01-22 21:02:53 +03:00
while ( ( c = getopt ( argc , argv , " 46opv " ) ) ! = - 1 ) {
2018-08-28 21:36:20 +03:00
switch ( c ) {
case ' 4 ' :
cfg_do_ipv4 = true ;
break ;
case ' 6 ' :
cfg_do_ipv6 = true ;
break ;
case ' o ' :
cfg_overlap = true ;
break ;
2019-01-22 21:02:53 +03:00
case ' p ' :
cfg_permissive = true ;
break ;
2018-08-28 21:36:20 +03:00
case ' v ' :
cfg_verbose = true ;
break ;
default :
error ( 1 , 0 , " %s: parse error " , argv [ 0 ] ) ;
}
}
}
int main ( int argc , char * * argv )
{
parse_opts ( argc , argv ) ;
seed = time ( NULL ) ;
srand ( seed ) ;
2018-09-21 21:17:17 +03:00
/* Print the seed to track/reproduce potential failures. */
printf ( " seed = %d \n " , seed ) ;
2018-08-28 21:36:20 +03:00
if ( cfg_do_ipv4 )
run_test_v4 ( ) ;
if ( cfg_do_ipv6 )
run_test_v6 ( ) ;
return 0 ;
}