2009-08-21 16:28:31 +04:00
/*
2018-07-24 06:51:21 +03:00
* Copyright ( c ) 2006 , 2017 Oracle and / or its affiliates . All rights reserved .
2009-08-21 16:28:31 +04:00
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*
*/
# include <linux/kernel.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
# include <linux/slab.h>
2009-08-21 16:28:31 +04:00
# include <linux/in.h>
2011-05-27 17:12:25 +04:00
# include <linux/module.h>
2009-08-21 16:28:31 +04:00
# include <net/tcp.h>
2015-08-05 08:43:26 +03:00
# include <net/net_namespace.h>
# include <net/netns/generic.h>
2018-07-24 06:51:21 +03:00
# include <net/tcp.h>
# include <net/addrconf.h>
2009-08-21 16:28:31 +04:00
# include "rds.h"
# include "tcp.h"
/* only for info exporting */
static DEFINE_SPINLOCK ( rds_tcp_tc_list_lock ) ;
static LIST_HEAD ( rds_tcp_tc_list ) ;
2010-10-19 12:08:33 +04:00
static unsigned int rds_tcp_tc_count ;
2009-08-21 16:28:31 +04:00
/* Track rds_tcp_connection structs so they can be cleaned up */
static DEFINE_SPINLOCK ( rds_tcp_conn_lock ) ;
static LIST_HEAD ( rds_tcp_conn_list ) ;
2018-02-03 15:26:51 +03:00
static atomic_t rds_tcp_unloading = ATOMIC_INIT ( 0 ) ;
2009-08-21 16:28:31 +04:00
static struct kmem_cache * rds_tcp_conn_slab ;
2016-03-16 21:38:12 +03:00
static int rds_tcp_skbuf_handler ( struct ctl_table * ctl , int write ,
void __user * buffer , size_t * lenp ,
loff_t * fpos ) ;
2016-06-17 21:12:46 +03:00
static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF ;
static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF ;
2016-03-16 21:38:12 +03:00
static struct ctl_table rds_tcp_sysctl_table [ ] = {
# define RDS_TCP_SNDBUF 0
{
. procname = " rds_tcp_sndbuf " ,
/* data is per-net pointer */
. maxlen = sizeof ( int ) ,
. mode = 0644 ,
. proc_handler = rds_tcp_skbuf_handler ,
. extra1 = & rds_tcp_min_sndbuf ,
} ,
# define RDS_TCP_RCVBUF 1
{
. procname = " rds_tcp_rcvbuf " ,
/* data is per-net pointer */
. maxlen = sizeof ( int ) ,
. mode = 0644 ,
. proc_handler = rds_tcp_skbuf_handler ,
. extra1 = & rds_tcp_min_rcvbuf ,
} ,
{ }
} ;
2009-08-21 16:28:31 +04:00
/* doing it this way avoids calling tcp_sk() */
void rds_tcp_nonagle ( struct socket * sock )
{
int val = 1 ;
2017-03-19 04:20:27 +03:00
kernel_setsockopt ( sock , SOL_TCP , TCP_NODELAY , ( void * ) & val ,
2009-08-21 16:28:31 +04:00
sizeof ( val ) ) ;
}
2018-01-19 00:11:07 +03:00
u32 rds_tcp_write_seq ( struct rds_tcp_connection * tc )
2009-08-21 16:28:31 +04:00
{
2018-01-19 00:11:07 +03:00
/* seq# of the last byte of data in tcp send buffer */
return tcp_sk ( tc - > t_sock - > sk ) - > write_seq ;
2009-08-21 16:28:31 +04:00
}
u32 rds_tcp_snd_una ( struct rds_tcp_connection * tc )
{
return tcp_sk ( tc - > t_sock - > sk ) - > snd_una ;
}
void rds_tcp_restore_callbacks ( struct socket * sock ,
struct rds_tcp_connection * tc )
{
rdsdebug ( " restoring sock %p callbacks from tc %p \n " , sock , tc ) ;
write_lock_bh ( & sock - > sk - > sk_callback_lock ) ;
/* done under the callback_lock to serialize with write_space */
spin_lock ( & rds_tcp_tc_list_lock ) ;
list_del_init ( & tc - > t_list_item ) ;
rds_tcp_tc_count - - ;
spin_unlock ( & rds_tcp_tc_list_lock ) ;
tc - > t_sock = NULL ;
sock - > sk - > sk_write_space = tc - > t_orig_write_space ;
sock - > sk - > sk_data_ready = tc - > t_orig_data_ready ;
sock - > sk - > sk_state_change = tc - > t_orig_state_change ;
sock - > sk - > sk_user_data = NULL ;
write_unlock_bh ( & sock - > sk - > sk_callback_lock ) ;
}
/*
2016-06-04 23:59:58 +03:00
* rds_tcp_reset_callbacks ( ) switches the to the new sock and
* returns the existing tc - > t_sock .
*
* The only functions that set tc - > t_sock are rds_tcp_set_callbacks
* and rds_tcp_reset_callbacks . Send and receive trust that
* it is set . The absence of RDS_CONN_UP bit protects those paths
* from being called while it isn ' t set .
*/
void rds_tcp_reset_callbacks ( struct socket * sock ,
2016-07-01 02:11:14 +03:00
struct rds_conn_path * cp )
2016-06-04 23:59:58 +03:00
{
2016-07-01 02:11:14 +03:00
struct rds_tcp_connection * tc = cp - > cp_transport_data ;
2016-06-04 23:59:58 +03:00
struct socket * osock = tc - > t_sock ;
if ( ! osock )
goto newsock ;
/* Need to resolve a duelling SYN between peers.
* We have an outstanding SYN to this peer , which may
* potentially have transitioned to the RDS_CONN_UP state ,
* so we must quiesce any send threads before resetting
2016-07-01 02:11:14 +03:00
* cp_transport_data . We quiesce these threads by setting
* cp_state to something other than RDS_CONN_UP , and then
2016-06-04 23:59:58 +03:00
* waiting for any existing threads in rds_send_xmit to
* complete release_in_xmit ( ) . ( Subsequent threads entering
* rds_send_xmit ( ) will bail on ! rds_conn_up ( ) .
2016-06-05 00:00:00 +03:00
*
* However an incoming syn - ack at this point would end up
* marking the conn as RDS_CONN_UP , and would again permit
* rds_send_xmi ( ) threads through , so ideally we would
* synchronize on RDS_CONN_UP after lock_sock ( ) , but cannot
* do that : waiting on ! RDS_IN_XMIT after lock_sock ( ) may
* end up deadlocking with tcp_sendmsg ( ) , and the RDS_IN_XMIT
* would not get set . As a result , we set c_state to
* RDS_CONN_RESETTTING , to ensure that rds_tcp_state_change
* cannot mark rds_conn_path_up ( ) in the window before lock_sock ( )
2016-06-04 23:59:58 +03:00
*/
2016-07-01 02:11:14 +03:00
atomic_set ( & cp - > cp_state , RDS_CONN_RESETTING ) ;
wait_event ( cp - > cp_waitq , ! test_bit ( RDS_IN_XMIT , & cp - > cp_flags ) ) ;
2016-06-04 23:59:58 +03:00
lock_sock ( osock - > sk ) ;
/* reset receive side state for rds_tcp_data_recv() for osock */
2016-07-14 13:51:02 +03:00
cancel_delayed_work_sync ( & cp - > cp_send_w ) ;
cancel_delayed_work_sync ( & cp - > cp_recv_w ) ;
2016-06-04 23:59:58 +03:00
if ( tc - > t_tinc ) {
rds_inc_put ( & tc - > t_tinc - > ti_inc ) ;
tc - > t_tinc = NULL ;
}
tc - > t_tinc_hdr_rem = sizeof ( struct rds_header ) ;
tc - > t_tinc_data_rem = 0 ;
2016-07-14 13:51:02 +03:00
rds_tcp_restore_callbacks ( osock , tc ) ;
2016-06-04 23:59:58 +03:00
release_sock ( osock - > sk ) ;
sock_release ( osock ) ;
newsock :
2016-07-01 02:11:14 +03:00
rds_send_path_reset ( cp ) ;
2016-06-04 23:59:58 +03:00
lock_sock ( sock - > sk ) ;
2016-07-14 13:51:02 +03:00
rds_tcp_set_callbacks ( sock , cp ) ;
2016-06-04 23:59:58 +03:00
release_sock ( sock - > sk ) ;
}
/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
* above rds_tcp_reset_callbacks for notes about synchronization
* with data path
2009-08-21 16:28:31 +04:00
*/
2016-07-01 02:11:14 +03:00
void rds_tcp_set_callbacks ( struct socket * sock , struct rds_conn_path * cp )
2009-08-21 16:28:31 +04:00
{
2016-07-01 02:11:14 +03:00
struct rds_tcp_connection * tc = cp - > cp_transport_data ;
2009-08-21 16:28:31 +04:00
rdsdebug ( " setting sock %p callbacks to tc %p \n " , sock , tc ) ;
write_lock_bh ( & sock - > sk - > sk_callback_lock ) ;
/* done under the callback_lock to serialize with write_space */
spin_lock ( & rds_tcp_tc_list_lock ) ;
list_add_tail ( & tc - > t_list_item , & rds_tcp_tc_list ) ;
rds_tcp_tc_count + + ;
spin_unlock ( & rds_tcp_tc_list_lock ) ;
/* accepted sockets need our listen data ready undone */
if ( sock - > sk - > sk_data_ready = = rds_tcp_listen_data_ready )
sock - > sk - > sk_data_ready = sock - > sk - > sk_user_data ;
tc - > t_sock = sock ;
2016-07-01 02:11:14 +03:00
tc - > t_cpath = cp ;
2009-08-21 16:28:31 +04:00
tc - > t_orig_data_ready = sock - > sk - > sk_data_ready ;
tc - > t_orig_write_space = sock - > sk - > sk_write_space ;
tc - > t_orig_state_change = sock - > sk - > sk_state_change ;
2016-07-01 02:11:14 +03:00
sock - > sk - > sk_user_data = cp ;
2009-08-21 16:28:31 +04:00
sock - > sk - > sk_data_ready = rds_tcp_data_ready ;
sock - > sk - > sk_write_space = rds_tcp_write_space ;
sock - > sk - > sk_state_change = rds_tcp_state_change ;
write_unlock_bh ( & sock - > sk - > sk_callback_lock ) ;
}
2016-11-04 20:04:11 +03:00
static void rds_tcp_tc_info ( struct socket * rds_sock , unsigned int len ,
2009-08-21 16:28:31 +04:00
struct rds_info_iterator * iter ,
struct rds_info_lengths * lens )
{
struct rds_info_tcp_socket tsinfo ;
struct rds_tcp_connection * tc ;
unsigned long flags ;
struct sockaddr_in sin ;
2016-11-04 20:04:11 +03:00
struct socket * sock ;
2009-08-21 16:28:31 +04:00
spin_lock_irqsave ( & rds_tcp_tc_list_lock , flags ) ;
if ( len / sizeof ( tsinfo ) < rds_tcp_tc_count )
goto out ;
list_for_each_entry ( tc , & rds_tcp_tc_list , t_list_item ) {
2016-11-04 20:04:11 +03:00
sock = tc - > t_sock ;
if ( sock ) {
2018-02-12 22:00:20 +03:00
sock - > ops - > getname ( sock , ( struct sockaddr * ) & sin , 0 ) ;
2016-11-04 20:04:11 +03:00
tsinfo . local_addr = sin . sin_addr . s_addr ;
tsinfo . local_port = sin . sin_port ;
2018-02-12 22:00:20 +03:00
sock - > ops - > getname ( sock , ( struct sockaddr * ) & sin , 1 ) ;
2016-11-04 20:04:11 +03:00
tsinfo . peer_addr = sin . sin_addr . s_addr ;
tsinfo . peer_port = sin . sin_port ;
}
2009-08-21 16:28:31 +04:00
tsinfo . hdr_rem = tc - > t_tinc_hdr_rem ;
tsinfo . data_rem = tc - > t_tinc_data_rem ;
tsinfo . last_sent_nxt = tc - > t_last_sent_nxt ;
tsinfo . last_expected_una = tc - > t_last_expected_una ;
tsinfo . last_seen_una = tc - > t_last_seen_una ;
rds_info_copy ( iter , & tsinfo , sizeof ( tsinfo ) ) ;
}
out :
lens - > nr = rds_tcp_tc_count ;
lens - > each = sizeof ( tsinfo ) ;
spin_unlock_irqrestore ( & rds_tcp_tc_list_lock , flags ) ;
}
2018-07-24 06:51:21 +03:00
static int rds_tcp_laddr_check ( struct net * net , const struct in6_addr * addr ,
__u32 scope_id )
2009-08-21 16:28:31 +04:00
{
2018-07-24 06:51:21 +03:00
struct net_device * dev = NULL ;
int ret ;
if ( ipv6_addr_v4mapped ( addr ) ) {
if ( inet_addr_type ( net , addr - > s6_addr32 [ 3 ] ) = = RTN_LOCAL )
return 0 ;
return - EADDRNOTAVAIL ;
}
/* If the scope_id is specified, check only those addresses
* hosted on the specified interface .
*/
if ( scope_id ! = 0 ) {
rcu_read_lock ( ) ;
dev = dev_get_by_index_rcu ( net , scope_id ) ;
/* scope_id is not valid... */
if ( ! dev ) {
rcu_read_unlock ( ) ;
return - EADDRNOTAVAIL ;
}
rcu_read_unlock ( ) ;
}
ret = ipv6_chk_addr ( net , addr , dev , 0 ) ;
if ( ret )
2009-08-21 16:28:31 +04:00
return 0 ;
return - EADDRNOTAVAIL ;
}
2017-12-22 20:39:01 +03:00
static void rds_tcp_conn_free ( void * arg )
{
struct rds_tcp_connection * tc = arg ;
2018-03-15 13:54:26 +03:00
unsigned long flags ;
2017-12-22 20:39:01 +03:00
rdsdebug ( " freeing tc %p \n " , tc ) ;
2018-03-15 13:54:26 +03:00
spin_lock_irqsave ( & rds_tcp_conn_lock , flags ) ;
2017-12-22 20:39:01 +03:00
if ( ! tc - > t_tcp_node_detached )
list_del ( & tc - > t_tcp_node ) ;
2018-03-15 13:54:26 +03:00
spin_unlock_irqrestore ( & rds_tcp_conn_lock , flags ) ;
2017-12-22 20:39:01 +03:00
kmem_cache_free ( rds_tcp_conn_slab , tc ) ;
}
2009-08-21 16:28:31 +04:00
static int rds_tcp_conn_alloc ( struct rds_connection * conn , gfp_t gfp )
{
struct rds_tcp_connection * tc ;
2017-12-22 20:39:01 +03:00
int i , j ;
int ret = 0 ;
2009-08-21 16:28:31 +04:00
2016-07-01 02:11:12 +03:00
for ( i = 0 ; i < RDS_MPATH_WORKERS ; i + + ) {
tc = kmem_cache_alloc ( rds_tcp_conn_slab , gfp ) ;
2017-12-22 20:39:01 +03:00
if ( ! tc ) {
ret = - ENOMEM ;
2018-02-03 15:26:51 +03:00
goto fail ;
2017-12-22 20:39:01 +03:00
}
2016-07-01 02:11:12 +03:00
mutex_init ( & tc - > t_conn_path_lock ) ;
tc - > t_sock = NULL ;
tc - > t_tinc = NULL ;
tc - > t_tinc_hdr_rem = sizeof ( struct rds_header ) ;
tc - > t_tinc_data_rem = 0 ;
2009-08-21 16:28:31 +04:00
2016-07-01 02:11:12 +03:00
conn - > c_path [ i ] . cp_transport_data = tc ;
tc - > t_cpath = & conn - > c_path [ i ] ;
2018-02-03 15:26:51 +03:00
tc - > t_tcp_node_detached = true ;
2009-08-21 16:28:31 +04:00
2016-07-01 02:11:12 +03:00
rdsdebug ( " rds_conn_path [%d] tc %p \n " , i ,
conn - > c_path [ i ] . cp_transport_data ) ;
}
2018-03-15 13:54:26 +03:00
spin_lock_irq ( & rds_tcp_conn_lock ) ;
2018-02-03 15:26:51 +03:00
for ( i = 0 ; i < RDS_MPATH_WORKERS ; i + + ) {
tc = conn - > c_path [ i ] . cp_transport_data ;
tc - > t_tcp_node_detached = false ;
list_add_tail ( & tc - > t_tcp_node , & rds_tcp_conn_list ) ;
}
2018-03-15 13:54:26 +03:00
spin_unlock_irq ( & rds_tcp_conn_lock ) ;
2018-02-03 15:26:51 +03:00
fail :
2017-12-22 20:39:01 +03:00
if ( ret ) {
for ( j = 0 ; j < i ; j + + )
rds_tcp_conn_free ( conn - > c_path [ j ] . cp_transport_data ) ;
}
return ret ;
2009-08-21 16:28:31 +04:00
}
2016-07-01 02:11:13 +03:00
static bool list_has_conn ( struct list_head * list , struct rds_connection * conn )
{
struct rds_tcp_connection * tc , * _tc ;
list_for_each_entry_safe ( tc , _tc , list , t_tcp_node ) {
if ( tc - > t_cpath - > cp_conn = = conn )
return true ;
}
return false ;
}
2018-02-03 15:26:51 +03:00
static void rds_tcp_set_unloading ( void )
{
atomic_set ( & rds_tcp_unloading , 1 ) ;
}
static bool rds_tcp_is_unloading ( struct rds_connection * conn )
{
return atomic_read ( & rds_tcp_unloading ) ! = 0 ;
}
2009-08-21 16:28:31 +04:00
static void rds_tcp_destroy_conns ( void )
{
struct rds_tcp_connection * tc , * _tc ;
LIST_HEAD ( tmp_list ) ;
/* avoid calling conn_destroy with irqs off */
spin_lock_irq ( & rds_tcp_conn_lock ) ;
2016-07-01 02:11:13 +03:00
list_for_each_entry_safe ( tc , _tc , & rds_tcp_conn_list , t_tcp_node ) {
if ( ! list_has_conn ( & tmp_list , tc - > t_cpath - > cp_conn ) )
list_move_tail ( & tc - > t_tcp_node , & tmp_list ) ;
}
2009-08-21 16:28:31 +04:00
spin_unlock_irq ( & rds_tcp_conn_lock ) ;
2016-07-01 02:11:11 +03:00
list_for_each_entry_safe ( tc , _tc , & tmp_list , t_tcp_node )
2016-07-01 02:11:12 +03:00
rds_conn_destroy ( tc - > t_cpath - > cp_conn ) ;
2009-08-21 16:28:31 +04:00
}
2015-08-05 08:43:26 +03:00
static void rds_tcp_exit ( void ) ;
2009-08-21 16:28:31 +04:00
struct rds_transport rds_tcp_transport = {
. laddr_check = rds_tcp_laddr_check ,
2016-07-01 02:11:10 +03:00
. xmit_path_prepare = rds_tcp_xmit_path_prepare ,
. xmit_path_complete = rds_tcp_xmit_path_complete ,
2009-08-21 16:28:31 +04:00
. xmit = rds_tcp_xmit ,
2016-07-01 02:11:15 +03:00
. recv_path = rds_tcp_recv_path ,
2009-08-21 16:28:31 +04:00
. conn_alloc = rds_tcp_conn_alloc ,
. conn_free = rds_tcp_conn_free ,
2016-07-01 02:11:16 +03:00
. conn_path_connect = rds_tcp_conn_path_connect ,
2016-07-01 02:11:10 +03:00
. conn_path_shutdown = rds_tcp_conn_path_shutdown ,
2009-08-21 16:28:31 +04:00
. inc_copy_to_user = rds_tcp_inc_copy_to_user ,
. inc_free = rds_tcp_inc_free ,
. stats_info_copy = rds_tcp_stats_info_copy ,
. exit = rds_tcp_exit ,
. t_owner = THIS_MODULE ,
. t_name = " tcp " ,
2009-08-21 16:28:34 +04:00
. t_type = RDS_TRANS_TCP ,
2009-08-21 16:28:31 +04:00
. t_prefer_loopback = 1 ,
2016-07-14 13:51:03 +03:00
. t_mp_capable = 1 ,
2018-02-03 15:26:51 +03:00
. t_unloading = rds_tcp_is_unloading ,
2009-08-21 16:28:31 +04:00
} ;
netns: make struct pernet_operations::id unsigned int
Make struct pernet_operations::id unsigned.
There are 2 reasons to do so:
1)
This field is really an index into an zero based array and
thus is unsigned entity. Using negative value is out-of-bound
access by definition.
2)
On x86_64 unsigned 32-bit data which are mixed with pointers
via array indexing or offsets added or subtracted to pointers
are preffered to signed 32-bit data.
"int" being used as an array index needs to be sign-extended
to 64-bit before being used.
void f(long *p, int i)
{
g(p[i]);
}
roughly translates to
movsx rsi, esi
mov rdi, [rsi+...]
call g
MOVSX is 3 byte instruction which isn't necessary if the variable is
unsigned because x86_64 is zero extending by default.
Now, there is net_generic() function which, you guessed it right, uses
"int" as an array index:
static inline void *net_generic(const struct net *net, int id)
{
...
ptr = ng->ptr[id - 1];
...
}
And this function is used a lot, so those sign extensions add up.
Patch snipes ~1730 bytes on allyesconfig kernel (without all junk
messing with code generation):
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
Unfortunately some functions actually grow bigger.
This is a semmingly random artefact of code generation with register
allocator being used differently. gcc decides that some variable
needs to live in new r8+ registers and every access now requires REX
prefix. Or it is shifted into r12, so [r12+0] addressing mode has to be
used which is longer than [r8]
However, overall balance is in negative direction:
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
function old new delta
nfsd4_lock 3886 3959 +73
tipc_link_build_proto_msg 1096 1140 +44
mac80211_hwsim_new_radio 2776 2808 +32
tipc_mon_rcv 1032 1058 +26
svcauth_gss_legacy_init 1413 1429 +16
tipc_bcbase_select_primary 379 392 +13
nfsd4_exchange_id 1247 1260 +13
nfsd4_setclientid_confirm 782 793 +11
...
put_client_renew_locked 494 480 -14
ip_set_sockfn_get 730 716 -14
geneve_sock_add 829 813 -16
nfsd4_sequence_done 721 703 -18
nlmclnt_lookup_host 708 686 -22
nfsd4_lockt 1085 1063 -22
nfs_get_client 1077 1050 -27
tcf_bpf_init 1106 1076 -30
nfsd4_encode_fattr 5997 5930 -67
Total: Before=154856051, After=154854321, chg -0.00%
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-17 04:58:21 +03:00
static unsigned int rds_tcp_netid ;
2015-08-05 08:43:26 +03:00
/* per-network namespace private data for this module */
struct rds_tcp_net {
struct socket * rds_tcp_listen_sock ;
struct work_struct rds_tcp_accept_w ;
2016-03-16 21:38:12 +03:00
struct ctl_table_header * rds_tcp_sysctl ;
struct ctl_table * ctl_table ;
int sndbuf_size ;
int rcvbuf_size ;
2015-08-05 08:43:26 +03:00
} ;
2016-03-16 21:38:12 +03:00
/* All module specific customizations to the RDS-TCP socket should be done in
* rds_tcp_tune ( ) and applied after socket creation .
*/
void rds_tcp_tune ( struct socket * sock )
{
struct sock * sk = sock - > sk ;
struct net * net = sock_net ( sk ) ;
struct rds_tcp_net * rtn = net_generic ( net , rds_tcp_netid ) ;
rds_tcp_nonagle ( sock ) ;
lock_sock ( sk ) ;
if ( rtn - > sndbuf_size > 0 ) {
sk - > sk_sndbuf = rtn - > sndbuf_size ;
sk - > sk_userlocks | = SOCK_SNDBUF_LOCK ;
}
if ( rtn - > rcvbuf_size > 0 ) {
sk - > sk_sndbuf = rtn - > rcvbuf_size ;
sk - > sk_userlocks | = SOCK_RCVBUF_LOCK ;
}
release_sock ( sk ) ;
}
2015-08-05 08:43:26 +03:00
static void rds_tcp_accept_worker ( struct work_struct * work )
{
struct rds_tcp_net * rtn = container_of ( work ,
struct rds_tcp_net ,
rds_tcp_accept_w ) ;
while ( rds_tcp_accept_one ( rtn - > rds_tcp_listen_sock ) = = 0 )
cond_resched ( ) ;
}
void rds_tcp_accept_work ( struct sock * sk )
{
struct net * net = sock_net ( sk ) ;
struct rds_tcp_net * rtn = net_generic ( net , rds_tcp_netid ) ;
queue_work ( rds_wq , & rtn - > rds_tcp_accept_w ) ;
}
static __net_init int rds_tcp_init_net ( struct net * net )
{
struct rds_tcp_net * rtn = net_generic ( net , rds_tcp_netid ) ;
2016-03-16 21:38:12 +03:00
struct ctl_table * tbl ;
int err = 0 ;
2015-08-05 08:43:26 +03:00
2016-03-16 21:38:12 +03:00
memset ( rtn , 0 , sizeof ( * rtn ) ) ;
/* {snd, rcv}buf_size default to 0, which implies we let the
* stack pick the value , and permit auto - tuning of buffer size .
*/
if ( net = = & init_net ) {
tbl = rds_tcp_sysctl_table ;
} else {
tbl = kmemdup ( rds_tcp_sysctl_table ,
sizeof ( rds_tcp_sysctl_table ) , GFP_KERNEL ) ;
if ( ! tbl ) {
pr_warn ( " could not set allocate syctl table \n " ) ;
return - ENOMEM ;
}
rtn - > ctl_table = tbl ;
}
tbl [ RDS_TCP_SNDBUF ] . data = & rtn - > sndbuf_size ;
tbl [ RDS_TCP_RCVBUF ] . data = & rtn - > rcvbuf_size ;
rtn - > rds_tcp_sysctl = register_net_sysctl ( net , " net/rds/tcp " , tbl ) ;
if ( ! rtn - > rds_tcp_sysctl ) {
pr_warn ( " could not register sysctl \n " ) ;
err = - ENOMEM ;
goto fail ;
}
2015-08-05 08:43:26 +03:00
rtn - > rds_tcp_listen_sock = rds_tcp_listen_init ( net ) ;
if ( ! rtn - > rds_tcp_listen_sock ) {
pr_warn ( " could not set up listen sock \n " ) ;
2016-03-16 21:38:12 +03:00
unregister_net_sysctl_table ( rtn - > rds_tcp_sysctl ) ;
rtn - > rds_tcp_sysctl = NULL ;
err = - EAFNOSUPPORT ;
goto fail ;
2015-08-05 08:43:26 +03:00
}
INIT_WORK ( & rtn - > rds_tcp_accept_w , rds_tcp_accept_worker ) ;
return 0 ;
2016-03-16 21:38:12 +03:00
fail :
if ( net ! = & init_net )
kfree ( tbl ) ;
return err ;
2015-08-05 08:43:26 +03:00
}
static void rds_tcp_kill_sock ( struct net * net )
{
struct rds_tcp_connection * tc , * _tc ;
LIST_HEAD ( tmp_list ) ;
struct rds_tcp_net * rtn = net_generic ( net , rds_tcp_netid ) ;
2017-03-04 19:57:35 +03:00
struct socket * lsock = rtn - > rds_tcp_listen_sock ;
2015-08-05 08:43:26 +03:00
rtn - > rds_tcp_listen_sock = NULL ;
2017-03-04 19:57:35 +03:00
rds_tcp_listen_stop ( lsock , & rtn - > rds_tcp_accept_w ) ;
2018-03-15 13:54:26 +03:00
spin_lock_irq ( & rds_tcp_conn_lock ) ;
2015-08-05 08:43:26 +03:00
list_for_each_entry_safe ( tc , _tc , & rds_tcp_conn_list , t_tcp_node ) {
2017-11-30 22:11:28 +03:00
struct net * c_net = read_pnet ( & tc - > t_cpath - > cp_conn - > c_net ) ;
2015-08-05 08:43:26 +03:00
if ( net ! = c_net | | ! tc - > t_sock )
continue ;
2017-11-30 22:11:29 +03:00
if ( ! list_has_conn ( & tmp_list , tc - > t_cpath - > cp_conn ) ) {
2016-07-01 02:11:13 +03:00
list_move_tail ( & tc - > t_tcp_node , & tmp_list ) ;
2017-11-30 22:11:29 +03:00
} else {
list_del ( & tc - > t_tcp_node ) ;
tc - > t_tcp_node_detached = true ;
}
2015-08-05 08:43:26 +03:00
}
2018-03-15 13:54:26 +03:00
spin_unlock_irq ( & rds_tcp_conn_lock ) ;
2017-11-30 22:11:27 +03:00
list_for_each_entry_safe ( tc , _tc , & tmp_list , t_tcp_node )
2016-07-01 02:11:12 +03:00
rds_conn_destroy ( tc - > t_cpath - > cp_conn ) ;
2015-08-05 08:43:26 +03:00
}
2018-03-19 16:52:48 +03:00
static void __net_exit rds_tcp_exit_net ( struct net * net )
2016-07-14 13:51:01 +03:00
{
struct rds_tcp_net * rtn = net_generic ( net , rds_tcp_netid ) ;
2017-03-04 19:57:35 +03:00
2018-03-19 16:52:48 +03:00
rds_tcp_kill_sock ( net ) ;
2016-07-14 13:51:01 +03:00
2018-03-19 16:52:48 +03:00
if ( rtn - > rds_tcp_sysctl )
unregister_net_sysctl_table ( rtn - > rds_tcp_sysctl ) ;
if ( net ! = & init_net & & rtn - > ctl_table )
kfree ( rtn - > ctl_table ) ;
2016-07-14 13:51:01 +03:00
}
2018-03-19 16:52:48 +03:00
static struct pernet_operations rds_tcp_net_ops = {
. init = rds_tcp_init_net ,
. exit = rds_tcp_exit_net ,
. id = & rds_tcp_netid ,
. size = sizeof ( struct rds_tcp_net ) ,
} ;
void * rds_tcp_listen_sock_def_readable ( struct net * net )
2015-08-05 08:43:26 +03:00
{
2018-03-19 16:52:48 +03:00
struct rds_tcp_net * rtn = net_generic ( net , rds_tcp_netid ) ;
struct socket * lsock = rtn - > rds_tcp_listen_sock ;
2015-08-05 08:43:26 +03:00
2018-03-19 16:52:48 +03:00
if ( ! lsock )
return NULL ;
2015-08-05 08:43:26 +03:00
2018-03-19 16:52:48 +03:00
return lsock - > sk - > sk_user_data ;
2015-08-05 08:43:26 +03:00
}
2016-03-16 21:38:12 +03:00
/* when sysctl is used to modify some kernel socket parameters,this
* function resets the RDS connections in that netns so that we can
* restart with new parameters . The assumption is that such reset
* events are few and far - between .
*/
static void rds_tcp_sysctl_reset ( struct net * net )
{
struct rds_tcp_connection * tc , * _tc ;
2018-03-15 13:54:26 +03:00
spin_lock_irq ( & rds_tcp_conn_lock ) ;
2016-03-16 21:38:12 +03:00
list_for_each_entry_safe ( tc , _tc , & rds_tcp_conn_list , t_tcp_node ) {
2017-11-30 22:11:28 +03:00
struct net * c_net = read_pnet ( & tc - > t_cpath - > cp_conn - > c_net ) ;
2016-03-16 21:38:12 +03:00
if ( net ! = c_net | | ! tc - > t_sock )
continue ;
2016-07-01 02:11:12 +03:00
/* reconnect with new parameters */
2017-07-17 02:43:46 +03:00
rds_conn_path_drop ( tc - > t_cpath , false ) ;
2016-03-16 21:38:12 +03:00
}
2018-03-15 13:54:26 +03:00
spin_unlock_irq ( & rds_tcp_conn_lock ) ;
2016-03-16 21:38:12 +03:00
}
static int rds_tcp_skbuf_handler ( struct ctl_table * ctl , int write ,
void __user * buffer , size_t * lenp ,
loff_t * fpos )
{
struct net * net = current - > nsproxy - > net_ns ;
int err ;
err = proc_dointvec_minmax ( ctl , write , buffer , lenp , fpos ) ;
if ( err < 0 ) {
pr_warn ( " Invalid input. Must be >= %d \n " ,
* ( int * ) ( ctl - > extra1 ) ) ;
return err ;
}
if ( write )
rds_tcp_sysctl_reset ( net ) ;
return 0 ;
}
2015-08-05 08:43:26 +03:00
static void rds_tcp_exit ( void )
{
2018-02-03 15:26:51 +03:00
rds_tcp_set_unloading ( ) ;
synchronize_rcu ( ) ;
2015-08-05 08:43:26 +03:00
rds_info_deregister_func ( RDS_INFO_TCP_SOCKETS , rds_tcp_tc_info ) ;
2018-03-19 16:52:48 +03:00
unregister_pernet_device ( & rds_tcp_net_ops ) ;
2015-08-05 08:43:26 +03:00
rds_tcp_destroy_conns ( ) ;
rds_trans_unregister ( & rds_tcp_transport ) ;
rds_tcp_recv_exit ( ) ;
kmem_cache_destroy ( rds_tcp_conn_slab ) ;
}
module_exit ( rds_tcp_exit ) ;
2010-10-19 12:08:33 +04:00
static int rds_tcp_init ( void )
2009-08-21 16:28:31 +04:00
{
int ret ;
rds_tcp_conn_slab = kmem_cache_create ( " rds_tcp_connection " ,
sizeof ( struct rds_tcp_connection ) ,
0 , 0 , NULL ) ;
2010-01-12 22:56:44 +03:00
if ( ! rds_tcp_conn_slab ) {
2009-08-21 16:28:31 +04:00
ret = - ENOMEM ;
goto out ;
}
2017-03-04 19:57:34 +03:00
ret = rds_tcp_recv_init ( ) ;
if ( ret )
2017-02-24 12:28:01 +03:00
goto out_slab ;
2015-08-05 08:43:26 +03:00
2018-03-19 16:52:48 +03:00
ret = register_pernet_device ( & rds_tcp_net_ops ) ;
2015-08-05 08:43:26 +03:00
if ( ret )
2017-03-04 19:57:34 +03:00
goto out_recv ;
2015-08-05 08:43:26 +03:00
2017-03-03 08:44:26 +03:00
rds_trans_register ( & rds_tcp_transport ) ;
2009-08-21 16:28:31 +04:00
rds_info_register_func ( RDS_INFO_TCP_SOCKETS , rds_tcp_tc_info ) ;
goto out ;
2017-03-04 19:57:34 +03:00
out_recv :
rds_tcp_recv_exit ( ) ;
2017-02-24 12:28:01 +03:00
out_slab :
2009-08-21 16:28:31 +04:00
kmem_cache_destroy ( rds_tcp_conn_slab ) ;
out :
return ret ;
}
module_init ( rds_tcp_init ) ;
MODULE_AUTHOR ( " Oracle Corporation <rds-devel@oss.oracle.com> " ) ;
MODULE_DESCRIPTION ( " RDS: TCP transport " ) ;
MODULE_LICENSE ( " Dual BSD/GPL " ) ;