2005-06-19 09:47:59 +04:00
/*
* NET Generic infrastructure for Network protocols .
*
* Authors : Arnaldo Carvalho de Melo < acme @ conectiva . com . br >
*
* From code originally in include / net / tcp . h
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/module.h>
# include <linux/random.h>
# include <linux/slab.h>
# include <linux/string.h>
2006-11-16 13:30:37 +03:00
# include <linux/vmalloc.h>
2005-06-19 09:47:59 +04:00
# include <net/request_sock.h>
2005-06-19 09:49:40 +04:00
/*
* Maximum number of SYN_RECV sockets in queue per LISTEN socket .
* One SYN_RECV socket costs about 80 bytes on a 32 bit machine .
* It would be better to replace it with a global counter for all sockets
* but then some measure against one socket starving all other sockets
* would be needed .
*
2011-12-06 01:39:41 +04:00
* The minimum value of it is 128. Experiments with real servers show that
2005-06-19 09:49:40 +04:00
* it is absolutely not enough even at 100 conn / sec . 256 cures most
2011-12-06 01:39:41 +04:00
* of problems .
* This value is adjusted to 128 for low memory machines ,
* and it will increase in proportion to the memory of machine .
2006-11-16 13:30:37 +03:00
* Note : Dont forget somaxconn that may limit backlog too .
2005-06-19 09:49:40 +04:00
*/
int sysctl_max_syn_backlog = 256 ;
2010-12-02 23:14:29 +03:00
EXPORT_SYMBOL ( sysctl_max_syn_backlog ) ;
2005-06-19 09:49:40 +04:00
2005-06-19 09:47:59 +04:00
int reqsk_queue_alloc ( struct request_sock_queue * queue ,
2006-11-16 13:30:37 +03:00
unsigned int nr_table_entries )
2005-06-19 09:47:59 +04:00
{
2006-11-16 13:30:37 +03:00
size_t lopt_size = sizeof ( struct listen_sock ) ;
struct listen_sock * lopt ;
nr_table_entries = min_t ( u32 , nr_table_entries , sysctl_max_syn_backlog ) ;
nr_table_entries = max_t ( u32 , nr_table_entries , 8 ) ;
nr_table_entries = roundup_pow_of_two ( nr_table_entries + 1 ) ;
lopt_size + = nr_table_entries * sizeof ( struct request_sock * ) ;
if ( lopt_size > PAGE_SIZE )
2010-11-20 10:46:35 +03:00
lopt = vzalloc ( lopt_size ) ;
2006-11-16 13:30:37 +03:00
else
lopt = kzalloc ( lopt_size , GFP_KERNEL ) ;
2005-06-19 09:47:59 +04:00
if ( lopt = = NULL )
return - ENOMEM ;
2006-11-16 13:30:37 +03:00
for ( lopt - > max_qlen_log = 3 ;
( 1 < < lopt - > max_qlen_log ) < nr_table_entries ;
2005-06-19 09:47:59 +04:00
lopt - > max_qlen_log + + ) ;
get_random_bytes ( & lopt - > hash_rnd , sizeof ( lopt - > hash_rnd ) ) ;
rwlock_init ( & queue - > syn_wait_lock ) ;
2006-03-27 05:39:55 +04:00
queue - > rskq_accept_head = NULL ;
2005-08-10 06:33:31 +04:00
lopt - > nr_table_entries = nr_table_entries ;
2005-06-19 09:47:59 +04:00
write_lock_bh ( & queue - > syn_wait_lock ) ;
queue - > listen_opt = lopt ;
write_unlock_bh ( & queue - > syn_wait_lock ) ;
return 0 ;
}
2007-11-15 13:57:06 +03:00
void __reqsk_queue_destroy ( struct request_sock_queue * queue )
{
struct listen_sock * lopt ;
size_t lopt_size ;
/*
* this is an error recovery path only
* no locking needed and the lopt is not NULL
*/
lopt = queue - > listen_opt ;
lopt_size = sizeof ( struct listen_sock ) +
lopt - > nr_table_entries * sizeof ( struct request_sock * ) ;
if ( lopt_size > PAGE_SIZE )
vfree ( lopt ) ;
else
kfree ( lopt ) ;
}
static inline struct listen_sock * reqsk_queue_yank_listen_sk (
struct request_sock_queue * queue )
{
struct listen_sock * lopt ;
write_lock_bh ( & queue - > syn_wait_lock ) ;
lopt = queue - > listen_opt ;
queue - > listen_opt = NULL ;
write_unlock_bh ( & queue - > syn_wait_lock ) ;
return lopt ;
}
2005-08-10 06:33:31 +04:00
void reqsk_queue_destroy ( struct request_sock_queue * queue )
{
/* make all the listen_opt local to us */
struct listen_sock * lopt = reqsk_queue_yank_listen_sk ( queue ) ;
2006-11-16 13:30:37 +03:00
size_t lopt_size = sizeof ( struct listen_sock ) +
lopt - > nr_table_entries * sizeof ( struct request_sock * ) ;
2005-08-10 06:33:31 +04:00
if ( lopt - > qlen ! = 0 ) {
2006-11-16 13:30:37 +03:00
unsigned int i ;
2005-08-10 06:33:31 +04:00
for ( i = 0 ; i < lopt - > nr_table_entries ; i + + ) {
struct request_sock * req ;
while ( ( req = lopt - > syn_table [ i ] ) ! = NULL ) {
lopt - > syn_table [ i ] = req - > dl_next ;
lopt - > qlen - - ;
reqsk_free ( req ) ;
}
}
}
2008-07-26 08:43:18 +04:00
WARN_ON ( lopt - > qlen ! = 0 ) ;
2006-11-16 13:30:37 +03:00
if ( lopt_size > PAGE_SIZE )
vfree ( lopt ) ;
else
kfree ( lopt ) ;
2005-08-10 06:33:31 +04:00
}