2005-04-17 02:20:36 +04:00
/*
* linux / net / sunrpc / xprt . c
*
* This is a generic RPC call interface supporting congestion avoidance ,
* and asynchronous calls .
*
* The interface works like this :
*
* - When a process places a call , it allocates a request slot if
* one is available . Otherwise , it sleeps on the backlog queue
* ( xprt_reserve ) .
* - Next , the caller puts together the RPC message , stuffs it into
2005-08-12 00:25:47 +04:00
* the request struct , and calls xprt_transmit ( ) .
* - xprt_transmit sends the message and installs the caller on the
2009-04-01 17:23:03 +04:00
* transport ' s wait list . At the same time , if a reply is expected ,
* it installs a timer that is run after the packet ' s timeout has
* expired .
2005-04-17 02:20:36 +04:00
* - When a packet arrives , the data_ready handler walks the list of
2005-08-12 00:25:47 +04:00
* pending requests for that transport . If a matching XID is found , the
2005-04-17 02:20:36 +04:00
* caller is woken up , and the timer removed .
* - When no reply arrives within the timeout interval , the timer is
* fired by the kernel and runs xprt_timer ( ) . It either adjusts the
* timeout values ( minor timeout ) or wakes up the caller with a status
* of - ETIMEDOUT .
* - When the caller receives a notification from RPC that a reply arrived ,
* it should release the RPC slot , and process the reply .
* If the call timed out , it may choose to retry the operation by
* adjusting the initial timeout value , and simply calling rpc_call
* again .
*
* Support for async RPC is done through a set of RPC - specific scheduling
* primitives that ` transparently ' work for processes as well as async
* tasks that rely on callbacks .
*
* Copyright ( C ) 1995 - 1997 , Olaf Kirch < okir @ monad . swb . de >
2005-08-12 00:25:47 +04:00
*
* Transport switch API copyright ( C ) 2005 , Chuck Lever < cel @ netapp . com >
2005-04-17 02:20:36 +04:00
*/
2005-08-12 00:25:23 +04:00
# include <linux/module.h>
2005-04-17 02:20:36 +04:00
# include <linux/types.h>
2005-08-12 00:25:23 +04:00
# include <linux/interrupt.h>
2005-04-17 02:20:36 +04:00
# include <linux/workqueue.h>
2006-05-25 09:40:51 +04:00
# include <linux/net.h>
2010-05-07 21:34:47 +04:00
# include <linux/ktime.h>
2005-04-17 02:20:36 +04:00
2005-08-12 00:25:23 +04:00
# include <linux/sunrpc/clnt.h>
2006-03-20 21:44:22 +03:00
# include <linux/sunrpc/metrics.h>
2010-03-19 22:36:22 +03:00
# include <linux/sunrpc/bc_xprt.h>
2005-04-17 02:20:36 +04:00
2014-10-28 21:24:13 +03:00
# include <trace/events/sunrpc.h>
2009-04-01 17:23:03 +04:00
# include "sunrpc.h"
2005-04-17 02:20:36 +04:00
/*
* Local variables
*/
2014-11-18 00:58:04 +03:00
# if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
2005-04-17 02:20:36 +04:00
# define RPCDBG_FACILITY RPCDBG_XPRT
# endif
/*
* Local functions
*/
2011-07-18 00:57:32 +04:00
static void xprt_init ( struct rpc_xprt * xprt , struct net * net ) ;
2005-04-17 02:20:36 +04:00
static void xprt_request_init ( struct rpc_task * , struct rpc_xprt * ) ;
static void xprt_connect_status ( struct rpc_task * task ) ;
static int __xprt_get_cong ( struct rpc_xprt * , struct rpc_task * ) ;
2015-06-15 08:55:30 +03:00
static void __xprt_put_cong ( struct rpc_xprt * , struct rpc_rqst * ) ;
2012-03-02 02:01:05 +04:00
static void xprt_destroy ( struct rpc_xprt * xprt ) ;
2005-04-17 02:20:36 +04:00
2007-11-22 14:40:22 +03:00
static DEFINE_SPINLOCK ( xprt_list_lock ) ;
2007-09-10 21:46:00 +04:00
static LIST_HEAD ( xprt_list ) ;
/**
* xprt_register_transport - register a transport implementation
* @ transport : transport to register
*
* If a transport implementation is loaded as a kernel module , it can
* call this interface to make itself known to the RPC client .
*
* Returns :
* 0 : transport successfully registered
* - EEXIST : transport already registered
* - EINVAL : transport module being unloaded
*/
int xprt_register_transport ( struct xprt_class * transport )
{
struct xprt_class * t ;
int result ;
result = - EEXIST ;
spin_lock ( & xprt_list_lock ) ;
list_for_each_entry ( t , & xprt_list , list ) {
/* don't register the same transport class twice */
2007-09-10 21:47:57 +04:00
if ( t - > ident = = transport - > ident )
2007-09-10 21:46:00 +04:00
goto out ;
}
2008-07-31 09:53:56 +04:00
list_add_tail ( & transport - > list , & xprt_list ) ;
printk ( KERN_INFO " RPC: Registered %s transport module. \n " ,
transport - > name ) ;
result = 0 ;
2007-09-10 21:46:00 +04:00
out :
spin_unlock ( & xprt_list_lock ) ;
return result ;
}
EXPORT_SYMBOL_GPL ( xprt_register_transport ) ;
/**
* xprt_unregister_transport - unregister a transport implementation
2008-02-14 02:03:23 +03:00
* @ transport : transport to unregister
2007-09-10 21:46:00 +04:00
*
* Returns :
* 0 : transport successfully unregistered
* - ENOENT : transport never registered
*/
int xprt_unregister_transport ( struct xprt_class * transport )
{
struct xprt_class * t ;
int result ;
result = 0 ;
spin_lock ( & xprt_list_lock ) ;
list_for_each_entry ( t , & xprt_list , list ) {
if ( t = = transport ) {
printk ( KERN_INFO
" RPC: Unregistered %s transport module. \n " ,
transport - > name ) ;
list_del_init ( & transport - > list ) ;
goto out ;
}
}
result = - ENOENT ;
out :
spin_unlock ( & xprt_list_lock ) ;
return result ;
}
EXPORT_SYMBOL_GPL ( xprt_unregister_transport ) ;
2009-03-11 21:37:56 +03:00
/**
* xprt_load_transport - load a transport implementation
* @ transport_name : transport to load
*
* Returns :
* 0 : transport successfully loaded
* - ENOENT : transport module not available
*/
int xprt_load_transport ( const char * transport_name )
{
struct xprt_class * t ;
int result ;
result = 0 ;
spin_lock ( & xprt_list_lock ) ;
list_for_each_entry ( t , & xprt_list , list ) {
if ( strcmp ( t - > name , transport_name ) = = 0 ) {
spin_unlock ( & xprt_list_lock ) ;
goto out ;
}
}
spin_unlock ( & xprt_list_lock ) ;
2010-05-25 01:33:05 +04:00
result = request_module ( " xprt%s " , transport_name ) ;
2009-03-11 21:37:56 +03:00
out :
return result ;
}
EXPORT_SYMBOL_GPL ( xprt_load_transport ) ;
2005-08-26 03:25:51 +04:00
/**
* xprt_reserve_xprt - serialize write access to transports
* @ task : task that is requesting access to the transport
2011-07-28 10:54:36 +04:00
* @ xprt : pointer to the target transport
2005-08-26 03:25:51 +04:00
*
* This prevents mixing the payload of separate requests , and prevents
* transport connects from colliding with writes . No congestion control
* is provided .
*/
2011-07-18 00:01:03 +04:00
int xprt_reserve_xprt ( struct rpc_xprt * xprt , struct rpc_task * task )
2005-08-26 03:25:51 +04:00
{
struct rpc_rqst * req = task - > tk_rqstp ;
2011-07-18 02:11:34 +04:00
int priority ;
2005-08-26 03:25:51 +04:00
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) ) {
if ( task = = xprt - > snd_task )
return 1 ;
goto out_sleep ;
}
xprt - > snd_task = task ;
2013-09-27 19:28:40 +04:00
if ( req ! = NULL )
2011-07-18 00:01:03 +04:00
req - > rq_ntrans + + ;
2011-03-10 20:40:28 +03:00
2005-08-26 03:25:51 +04:00
return 1 ;
out_sleep :
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u failed to lock transport %p \n " ,
2005-08-26 03:25:51 +04:00
task - > tk_pid , xprt ) ;
task - > tk_timeout = 0 ;
task - > tk_status = - EAGAIN ;
2011-07-18 02:11:34 +04:00
if ( req = = NULL )
priority = RPC_PRIORITY_LOW ;
else if ( ! req - > rq_ntrans )
priority = RPC_PRIORITY_NORMAL ;
2005-08-26 03:25:51 +04:00
else
2011-07-18 02:11:34 +04:00
priority = RPC_PRIORITY_HIGH ;
rpc_sleep_on_priority ( & xprt - > sending , task , NULL , priority ) ;
2005-08-26 03:25:51 +04:00
return 0 ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_reserve_xprt ) ;
2005-08-26 03:25:51 +04:00
2006-01-03 11:55:55 +03:00
static void xprt_clear_locked ( struct rpc_xprt * xprt )
{
xprt - > snd_task = NULL ;
2012-09-12 01:21:25 +04:00
if ( ! test_bit ( XPRT_CLOSE_WAIT , & xprt - > state ) ) {
2014-03-17 21:06:10 +04:00
smp_mb__before_atomic ( ) ;
2006-01-03 11:55:55 +03:00
clear_bit ( XPRT_LOCKED , & xprt - > state ) ;
2014-03-17 21:06:10 +04:00
smp_mb__after_atomic ( ) ;
2006-01-03 11:55:55 +03:00
} else
2007-06-15 02:00:42 +04:00
queue_work ( rpciod_workqueue , & xprt - > task_cleanup ) ;
2006-01-03 11:55:55 +03:00
}
2005-04-17 02:20:36 +04:00
/*
2005-08-26 03:25:51 +04:00
* xprt_reserve_xprt_cong - serialize write access to transports
* @ task : task that is requesting access to the transport
*
* Same as xprt_reserve_xprt , but Van Jacobson congestion control is
* integrated into the decision of whether a request is allowed to be
* woken up and given access to the transport .
2005-04-17 02:20:36 +04:00
*/
2011-07-18 00:01:03 +04:00
int xprt_reserve_xprt_cong ( struct rpc_xprt * xprt , struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
struct rpc_rqst * req = task - > tk_rqstp ;
2011-07-18 02:11:34 +04:00
int priority ;
2005-04-17 02:20:36 +04:00
2005-08-12 00:25:38 +04:00
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) ) {
2005-04-17 02:20:36 +04:00
if ( task = = xprt - > snd_task )
return 1 ;
goto out_sleep ;
}
2011-07-18 00:01:03 +04:00
if ( req = = NULL ) {
xprt - > snd_task = task ;
return 1 ;
}
2005-08-26 03:25:51 +04:00
if ( __xprt_get_cong ( xprt , task ) ) {
2005-04-17 02:20:36 +04:00
xprt - > snd_task = task ;
2011-07-18 00:01:03 +04:00
req - > rq_ntrans + + ;
2005-04-17 02:20:36 +04:00
return 1 ;
}
2006-01-03 11:55:55 +03:00
xprt_clear_locked ( xprt ) ;
2005-04-17 02:20:36 +04:00
out_sleep :
2015-06-15 08:55:30 +03:00
if ( req )
__xprt_put_cong ( xprt , req ) ;
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u failed to lock transport %p \n " , task - > tk_pid , xprt ) ;
2005-04-17 02:20:36 +04:00
task - > tk_timeout = 0 ;
task - > tk_status = - EAGAIN ;
2011-07-18 02:11:34 +04:00
if ( req = = NULL )
priority = RPC_PRIORITY_LOW ;
else if ( ! req - > rq_ntrans )
priority = RPC_PRIORITY_NORMAL ;
2005-04-17 02:20:36 +04:00
else
2011-07-18 02:11:34 +04:00
priority = RPC_PRIORITY_HIGH ;
rpc_sleep_on_priority ( & xprt - > sending , task , NULL , priority ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_reserve_xprt_cong ) ;
2005-04-17 02:20:36 +04:00
2005-08-26 03:25:51 +04:00
static inline int xprt_lock_write ( struct rpc_xprt * xprt , struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
int retval ;
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2011-07-18 00:01:03 +04:00
retval = xprt - > ops - > reserve_xprt ( xprt , task ) ;
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
return retval ;
}
2012-01-18 07:57:37 +04:00
static bool __xprt_lock_write_func ( struct rpc_task * task , void * data )
2005-08-26 03:25:51 +04:00
{
2012-01-18 07:57:37 +04:00
struct rpc_xprt * xprt = data ;
2005-08-26 03:25:51 +04:00
struct rpc_rqst * req ;
req = task - > tk_rqstp ;
xprt - > snd_task = task ;
2013-09-27 19:28:40 +04:00
if ( req )
2005-08-26 03:25:51 +04:00
req - > rq_ntrans + + ;
2012-01-18 07:57:37 +04:00
return true ;
}
2005-08-26 03:25:51 +04:00
2012-01-18 07:57:37 +04:00
static void __xprt_lock_write_next ( struct rpc_xprt * xprt )
{
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) )
return ;
if ( rpc_wake_up_first ( & xprt - > sending , __xprt_lock_write_func , xprt ) )
return ;
2006-01-03 11:55:55 +03:00
xprt_clear_locked ( xprt ) ;
2005-08-26 03:25:51 +04:00
}
2012-01-18 07:57:37 +04:00
static bool __xprt_lock_write_cong_func ( struct rpc_task * task , void * data )
2005-04-17 02:20:36 +04:00
{
2012-01-18 07:57:37 +04:00
struct rpc_xprt * xprt = data ;
2011-07-18 00:01:03 +04:00
struct rpc_rqst * req ;
2005-04-17 02:20:36 +04:00
2011-07-18 00:01:03 +04:00
req = task - > tk_rqstp ;
if ( req = = NULL ) {
xprt - > snd_task = task ;
2012-01-18 07:57:37 +04:00
return true ;
2011-07-18 00:01:03 +04:00
}
2005-08-26 03:25:51 +04:00
if ( __xprt_get_cong ( xprt , task ) ) {
2005-04-17 02:20:36 +04:00
xprt - > snd_task = task ;
2011-07-18 00:01:03 +04:00
req - > rq_ntrans + + ;
2012-01-18 07:57:37 +04:00
return true ;
2005-04-17 02:20:36 +04:00
}
2012-01-18 07:57:37 +04:00
return false ;
}
static void __xprt_lock_write_next_cong ( struct rpc_xprt * xprt )
{
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) )
return ;
if ( RPCXPRT_CONGESTED ( xprt ) )
goto out_unlock ;
if ( rpc_wake_up_first ( & xprt - > sending , __xprt_lock_write_cong_func , xprt ) )
return ;
2005-04-17 02:20:36 +04:00
out_unlock :
2006-01-03 11:55:55 +03:00
xprt_clear_locked ( xprt ) ;
2005-04-17 02:20:36 +04:00
}
2015-03-23 23:10:00 +03:00
static void xprt_task_clear_bytes_sent ( struct rpc_task * task )
{
if ( task ! = NULL ) {
struct rpc_rqst * req = task - > tk_rqstp ;
if ( req ! = NULL )
req - > rq_bytes_sent = 0 ;
}
}
2005-08-26 03:25:51 +04:00
/**
* xprt_release_xprt - allow other requests to use a transport
* @ xprt : transport with other tasks potentially waiting
* @ task : task that is releasing access to the transport
*
* Note that " task " can be NULL . No congestion control is provided .
2005-04-17 02:20:36 +04:00
*/
2005-08-26 03:25:51 +04:00
void xprt_release_xprt ( struct rpc_xprt * xprt , struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
if ( xprt - > snd_task = = task ) {
2015-03-23 23:10:00 +03:00
xprt_task_clear_bytes_sent ( task ) ;
2006-01-03 11:55:55 +03:00
xprt_clear_locked ( xprt ) ;
2005-04-17 02:20:36 +04:00
__xprt_lock_write_next ( xprt ) ;
}
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_release_xprt ) ;
2005-04-17 02:20:36 +04:00
2005-08-26 03:25:51 +04:00
/**
* xprt_release_xprt_cong - allow other requests to use a transport
* @ xprt : transport with other tasks potentially waiting
* @ task : task that is releasing access to the transport
*
* Note that " task " can be NULL . Another task is awoken to use the
* transport if the transport ' s congestion window allows it .
*/
void xprt_release_xprt_cong ( struct rpc_xprt * xprt , struct rpc_task * task )
{
if ( xprt - > snd_task = = task ) {
2015-03-23 23:10:00 +03:00
xprt_task_clear_bytes_sent ( task ) ;
2006-01-03 11:55:55 +03:00
xprt_clear_locked ( xprt ) ;
2005-08-26 03:25:51 +04:00
__xprt_lock_write_next_cong ( xprt ) ;
}
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_release_xprt_cong ) ;
2005-08-26 03:25:51 +04:00
static inline void xprt_release_write ( struct rpc_xprt * xprt , struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2005-08-26 03:25:51 +04:00
xprt - > ops - > release_xprt ( xprt , task ) ;
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Van Jacobson congestion avoidance . Check if the congestion window
* overflowed . Put the task to sleep if this is the case .
*/
static int
__xprt_get_cong ( struct rpc_xprt * xprt , struct rpc_task * task )
{
struct rpc_rqst * req = task - > tk_rqstp ;
if ( req - > rq_cong )
return 1 ;
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu \n " ,
2005-04-17 02:20:36 +04:00
task - > tk_pid , xprt - > cong , xprt - > cwnd ) ;
if ( RPCXPRT_CONGESTED ( xprt ) )
return 0 ;
req - > rq_cong = 1 ;
xprt - > cong + = RPC_CWNDSCALE ;
return 1 ;
}
/*
* Adjust the congestion window , and wake up the next task
* that has been sleeping due to congestion
*/
static void
__xprt_put_cong ( struct rpc_xprt * xprt , struct rpc_rqst * req )
{
if ( ! req - > rq_cong )
return ;
req - > rq_cong = 0 ;
xprt - > cong - = RPC_CWNDSCALE ;
2005-08-26 03:25:51 +04:00
__xprt_lock_write_next_cong ( xprt ) ;
2005-04-17 02:20:36 +04:00
}
2005-08-26 03:25:53 +04:00
/**
* xprt_release_rqst_cong - housekeeping when request is complete
* @ task : RPC request that recently completed
*
* Useful for transports that require congestion control .
*/
void xprt_release_rqst_cong ( struct rpc_task * task )
{
2013-01-08 18:10:21 +04:00
struct rpc_rqst * req = task - > tk_rqstp ;
__xprt_put_cong ( req - > rq_xprt , req ) ;
2005-08-26 03:25:53 +04:00
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_release_rqst_cong ) ;
2005-08-26 03:25:53 +04:00
2005-08-26 03:25:52 +04:00
/**
* xprt_adjust_cwnd - adjust transport congestion window
2013-01-08 18:48:15 +04:00
* @ xprt : pointer to xprt
2005-08-26 03:25:52 +04:00
* @ task : recently completed RPC request used to adjust window
* @ result : result code of completed RPC request
*
2014-05-28 18:34:49 +04:00
* The transport code maintains an estimate on the maximum number of out -
* standing RPC requests , using a smoothed version of the congestion
* avoidance implemented in 44 BSD . This is basically the Van Jacobson
* congestion algorithm : If a retransmit occurs , the congestion window is
* halved ; otherwise , it is incremented by 1 / cwnd when
*
* - a reply is received and
* - a full number of requests are outstanding and
* - the congestion window hasn ' t been updated recently .
2005-04-17 02:20:36 +04:00
*/
2013-01-08 18:48:15 +04:00
void xprt_adjust_cwnd ( struct rpc_xprt * xprt , struct rpc_task * task , int result )
2005-04-17 02:20:36 +04:00
{
2005-08-26 03:25:52 +04:00
struct rpc_rqst * req = task - > tk_rqstp ;
unsigned long cwnd = xprt - > cwnd ;
2005-04-17 02:20:36 +04:00
if ( result > = 0 & & cwnd < = xprt - > cong ) {
/* The (cwnd >> 1) term makes sure
* the result gets rounded properly . */
cwnd + = ( RPC_CWNDSCALE * RPC_CWNDSCALE + ( cwnd > > 1 ) ) / cwnd ;
if ( cwnd > RPC_MAXCWND ( xprt ) )
cwnd = RPC_MAXCWND ( xprt ) ;
2005-08-26 03:25:51 +04:00
__xprt_lock_write_next_cong ( xprt ) ;
2005-04-17 02:20:36 +04:00
} else if ( result = = - ETIMEDOUT ) {
cwnd > > = 1 ;
if ( cwnd < RPC_CWNDSCALE )
cwnd = RPC_CWNDSCALE ;
}
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: cong %ld, cwnd was %ld, now %ld \n " ,
2005-04-17 02:20:36 +04:00
xprt - > cong , xprt - > cwnd , cwnd ) ;
xprt - > cwnd = cwnd ;
2005-08-26 03:25:52 +04:00
__xprt_put_cong ( xprt , req ) ;
2005-04-17 02:20:36 +04:00
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_adjust_cwnd ) ;
2005-04-17 02:20:36 +04:00
2005-08-12 00:25:44 +04:00
/**
* xprt_wake_pending_tasks - wake all tasks on a transport ' s pending queue
* @ xprt : transport with waiting tasks
* @ status : result code to plant in each task before waking it
*
*/
void xprt_wake_pending_tasks ( struct rpc_xprt * xprt , int status )
{
if ( status < 0 )
rpc_wake_up_status ( & xprt - > pending , status ) ;
else
rpc_wake_up ( & xprt - > pending ) ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_wake_pending_tasks ) ;
2005-08-12 00:25:44 +04:00
2005-08-12 00:25:50 +04:00
/**
* xprt_wait_for_buffer_space - wait for transport output buffer to clear
* @ task : task to be put to sleep
2008-04-27 09:59:02 +04:00
* @ action : function pointer to be executed after wait
2013-02-22 23:57:57 +04:00
*
* Note that we only set the timer for the case of RPC_IS_SOFT ( ) , since
* we don ' t in general want to force a socket disconnection due to
* an incomplete RPC call transmission .
2005-08-12 00:25:50 +04:00
*/
2008-04-18 02:52:19 +04:00
void xprt_wait_for_buffer_space ( struct rpc_task * task , rpc_action action )
2005-08-12 00:25:50 +04:00
{
struct rpc_rqst * req = task - > tk_rqstp ;
struct rpc_xprt * xprt = req - > rq_xprt ;
2013-02-22 23:57:57 +04:00
task - > tk_timeout = RPC_IS_SOFT ( task ) ? req - > rq_timeout : 0 ;
2008-04-18 02:52:19 +04:00
rpc_sleep_on ( & xprt - > pending , task , action ) ;
2005-08-12 00:25:50 +04:00
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_wait_for_buffer_space ) ;
2005-08-12 00:25:50 +04:00
/**
* xprt_write_space - wake the task waiting for transport output buffer space
* @ xprt : transport with waiting tasks
*
* Can be called in a soft IRQ context , so xprt_write_space never sleeps .
*/
void xprt_write_space ( struct rpc_xprt * xprt )
{
spin_lock_bh ( & xprt - > transport_lock ) ;
if ( xprt - > snd_task ) {
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: write space: waking waiting task on "
" xprt %p \n " , xprt ) ;
2008-02-23 00:34:12 +03:00
rpc_wake_up_queued_task ( & xprt - > pending , xprt - > snd_task ) ;
2005-08-12 00:25:50 +04:00
}
spin_unlock_bh ( & xprt - > transport_lock ) ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_write_space ) ;
2005-08-12 00:25:50 +04:00
2005-08-26 03:25:50 +04:00
/**
* xprt_set_retrans_timeout_def - set a request ' s retransmit timeout
* @ task : task whose timeout is to be set
*
* Set a request ' s retransmit timeout based on the transport ' s
* default timeout parameters . Used by transports that don ' t adjust
* the retransmit timeout based on round - trip time estimation .
*/
void xprt_set_retrans_timeout_def ( struct rpc_task * task )
{
task - > tk_timeout = task - > tk_rqstp - > rq_timeout ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_set_retrans_timeout_def ) ;
2005-08-26 03:25:50 +04:00
2012-07-10 14:55:09 +04:00
/**
2005-08-26 03:25:50 +04:00
* xprt_set_retrans_timeout_rtt - set a request ' s retransmit timeout
* @ task : task whose timeout is to be set
2007-02-10 02:38:13 +03:00
*
2005-08-26 03:25:50 +04:00
* Set a request ' s retransmit timeout using the RTT estimator .
*/
void xprt_set_retrans_timeout_rtt ( struct rpc_task * task )
{
int timer = task - > tk_msg . rpc_proc - > p_timer ;
2007-12-21 00:03:55 +03:00
struct rpc_clnt * clnt = task - > tk_client ;
struct rpc_rtt * rtt = clnt - > cl_rtt ;
2005-08-26 03:25:50 +04:00
struct rpc_rqst * req = task - > tk_rqstp ;
2007-12-21 00:03:55 +03:00
unsigned long max_timeout = clnt - > cl_timeout - > to_maxval ;
2005-08-26 03:25:50 +04:00
task - > tk_timeout = rpc_calc_rto ( rtt , timer ) ;
task - > tk_timeout < < = rpc_ntimeo ( rtt , timer ) + req - > rq_retries ;
if ( task - > tk_timeout > max_timeout | | task - > tk_timeout = = 0 )
task - > tk_timeout = max_timeout ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_set_retrans_timeout_rtt ) ;
2005-08-26 03:25:50 +04:00
2005-04-17 02:20:36 +04:00
static void xprt_reset_majortimeo ( struct rpc_rqst * req )
{
2007-12-21 00:03:55 +03:00
const struct rpc_timeout * to = req - > rq_task - > tk_client - > cl_timeout ;
2005-04-17 02:20:36 +04:00
req - > rq_majortimeo = req - > rq_timeout ;
if ( to - > to_exponential )
req - > rq_majortimeo < < = to - > to_retries ;
else
req - > rq_majortimeo + = to - > to_increment * to - > to_retries ;
if ( req - > rq_majortimeo > to - > to_maxval | | req - > rq_majortimeo = = 0 )
req - > rq_majortimeo = to - > to_maxval ;
req - > rq_majortimeo + = jiffies ;
}
2005-08-12 00:25:26 +04:00
/**
* xprt_adjust_timeout - adjust timeout values for next retransmit
* @ req : RPC request containing parameters to use for the adjustment
*
2005-04-17 02:20:36 +04:00
*/
int xprt_adjust_timeout ( struct rpc_rqst * req )
{
struct rpc_xprt * xprt = req - > rq_xprt ;
2007-12-21 00:03:55 +03:00
const struct rpc_timeout * to = req - > rq_task - > tk_client - > cl_timeout ;
2005-04-17 02:20:36 +04:00
int status = 0 ;
if ( time_before ( jiffies , req - > rq_majortimeo ) ) {
if ( to - > to_exponential )
req - > rq_timeout < < = 1 ;
else
req - > rq_timeout + = to - > to_increment ;
if ( to - > to_maxval & & req - > rq_timeout > = to - > to_maxval )
req - > rq_timeout = to - > to_maxval ;
req - > rq_retries + + ;
} else {
req - > rq_timeout = to - > to_initval ;
req - > rq_retries = 0 ;
xprt_reset_majortimeo ( req ) ;
/* Reset the RTT counters == "slow start" */
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
rpc_init_rtt ( req - > rq_task - > tk_client - > cl_rtt , to - > to_initval ) ;
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
status = - ETIMEDOUT ;
}
if ( req - > rq_timeout = = 0 ) {
printk ( KERN_WARNING " xprt_adjust_timeout: rq_timeout = 0! \n " ) ;
req - > rq_timeout = 5 * HZ ;
}
return status ;
}
2006-11-22 17:55:48 +03:00
static void xprt_autoclose ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2006-11-22 17:55:48 +03:00
struct rpc_xprt * xprt =
container_of ( work , struct rpc_xprt , task_cleanup ) ;
2005-04-17 02:20:36 +04:00
2007-11-06 18:18:36 +03:00
clear_bit ( XPRT_CLOSE_WAIT , & xprt - > state ) ;
2015-06-19 23:17:57 +03:00
xprt - > ops - > close ( xprt ) ;
2005-04-17 02:20:36 +04:00
xprt_release_write ( xprt , NULL ) ;
2015-09-18 22:53:24 +03:00
wake_up_bit ( & xprt - > state , XPRT_LOCKED ) ;
2005-04-17 02:20:36 +04:00
}
2005-08-12 00:25:26 +04:00
/**
2007-11-07 02:44:20 +03:00
* xprt_disconnect_done - mark a transport as disconnected
2005-08-12 00:25:26 +04:00
* @ xprt : transport to flag for disconnect
*
2005-04-17 02:20:36 +04:00
*/
2007-11-07 02:44:20 +03:00
void xprt_disconnect_done ( struct rpc_xprt * xprt )
2005-04-17 02:20:36 +04:00
{
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: disconnected transport %p \n " , xprt ) ;
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
xprt_clear_connected ( xprt ) ;
2009-03-11 21:38:00 +03:00
xprt_wake_pending_tasks ( xprt , - EAGAIN ) ;
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
}
2007-11-07 02:44:20 +03:00
EXPORT_SYMBOL_GPL ( xprt_disconnect_done ) ;
2005-04-17 02:20:36 +04:00
2007-11-06 18:18:36 +03:00
/**
* xprt_force_disconnect - force a transport to disconnect
* @ xprt : transport to disconnect
*
*/
void xprt_force_disconnect ( struct rpc_xprt * xprt )
{
/* Don't race with the test_bit() in xprt_clear_locked() */
spin_lock_bh ( & xprt - > transport_lock ) ;
set_bit ( XPRT_CLOSE_WAIT , & xprt - > state ) ;
/* Try to schedule an autoclose RPC call */
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) = = 0 )
queue_work ( rpciod_workqueue , & xprt - > task_cleanup ) ;
2009-03-11 21:38:00 +03:00
xprt_wake_pending_tasks ( xprt , - EAGAIN ) ;
2007-11-06 18:18:36 +03:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
}
2008-04-18 00:52:57 +04:00
/**
* xprt_conditional_disconnect - force a transport to disconnect
* @ xprt : transport to disconnect
* @ cookie : ' connection cookie '
*
* This attempts to break the connection if and only if ' cookie ' matches
* the current transport ' connection cookie ' . It ensures that we don ' t
* try to break the connection more than once when we need to retransmit
* a batch of RPC requests .
*
*/
void xprt_conditional_disconnect ( struct rpc_xprt * xprt , unsigned int cookie )
{
/* Don't race with the test_bit() in xprt_clear_locked() */
spin_lock_bh ( & xprt - > transport_lock ) ;
if ( cookie ! = xprt - > connect_cookie )
goto out ;
if ( test_bit ( XPRT_CLOSING , & xprt - > state ) | | ! xprt_connected ( xprt ) )
goto out ;
set_bit ( XPRT_CLOSE_WAIT , & xprt - > state ) ;
/* Try to schedule an autoclose RPC call */
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) = = 0 )
queue_work ( rpciod_workqueue , & xprt - > task_cleanup ) ;
2009-03-11 21:38:00 +03:00
xprt_wake_pending_tasks ( xprt , - EAGAIN ) ;
2008-04-18 00:52:57 +04:00
out :
spin_unlock_bh ( & xprt - > transport_lock ) ;
}
2005-04-17 02:20:36 +04:00
static void
xprt_init_autodisconnect ( unsigned long data )
{
struct rpc_xprt * xprt = ( struct rpc_xprt * ) data ;
2005-08-12 00:25:32 +04:00
spin_lock ( & xprt - > transport_lock ) ;
2012-09-12 01:21:25 +04:00
if ( ! list_empty ( & xprt - > recv ) )
2005-04-17 02:20:36 +04:00
goto out_abort ;
2005-08-12 00:25:38 +04:00
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) )
2005-04-17 02:20:36 +04:00
goto out_abort ;
2005-08-12 00:25:32 +04:00
spin_unlock ( & xprt - > transport_lock ) ;
2009-04-22 01:18:20 +04:00
queue_work ( rpciod_workqueue , & xprt - > task_cleanup ) ;
2005-04-17 02:20:36 +04:00
return ;
out_abort :
2005-08-12 00:25:32 +04:00
spin_unlock ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
}
2015-02-09 02:19:25 +03:00
bool xprt_lock_connect ( struct rpc_xprt * xprt ,
struct rpc_task * task ,
void * cookie )
{
bool ret = false ;
spin_lock_bh ( & xprt - > transport_lock ) ;
if ( ! test_bit ( XPRT_LOCKED , & xprt - > state ) )
goto out ;
if ( xprt - > snd_task ! = task )
goto out ;
2015-03-23 23:10:00 +03:00
xprt_task_clear_bytes_sent ( task ) ;
2015-02-09 02:19:25 +03:00
xprt - > snd_task = cookie ;
ret = true ;
out :
spin_unlock_bh ( & xprt - > transport_lock ) ;
return ret ;
}
void xprt_unlock_connect ( struct rpc_xprt * xprt , void * cookie )
{
spin_lock_bh ( & xprt - > transport_lock ) ;
if ( xprt - > snd_task ! = cookie )
goto out ;
if ( ! test_bit ( XPRT_LOCKED , & xprt - > state ) )
goto out ;
xprt - > snd_task = NULL ;
xprt - > ops - > release_xprt ( xprt , NULL ) ;
out :
spin_unlock_bh ( & xprt - > transport_lock ) ;
2015-09-18 22:53:24 +03:00
wake_up_bit ( & xprt - > state , XPRT_LOCKED ) ;
2015-02-09 02:19:25 +03:00
}
2005-08-12 00:25:26 +04:00
/**
* xprt_connect - schedule a transport connect operation
* @ task : RPC task that is requesting the connect
2005-04-17 02:20:36 +04:00
*
*/
void xprt_connect ( struct rpc_task * task )
{
2013-01-08 19:08:33 +04:00
struct rpc_xprt * xprt = task - > tk_rqstp - > rq_xprt ;
2005-04-17 02:20:36 +04:00
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_connect xprt %p %s connected \n " , task - > tk_pid ,
2005-04-17 02:20:36 +04:00
xprt , ( xprt_connected ( xprt ) ? " is " : " is not " ) ) ;
2006-08-23 04:06:15 +04:00
if ( ! xprt_bound ( xprt ) ) {
2009-03-11 21:09:39 +03:00
task - > tk_status = - EAGAIN ;
2005-04-17 02:20:36 +04:00
return ;
}
if ( ! xprt_lock_write ( xprt , task ) )
return ;
2009-12-03 16:10:17 +03:00
if ( test_and_clear_bit ( XPRT_CLOSE_WAIT , & xprt - > state ) )
xprt - > ops - > close ( xprt ) ;
2015-02-09 02:19:25 +03:00
if ( ! xprt_connected ( xprt ) ) {
2012-02-01 11:46:20 +04:00
task - > tk_rqstp - > rq_bytes_sent = 0 ;
2010-04-17 00:42:12 +04:00
task - > tk_timeout = task - > tk_rqstp - > rq_timeout ;
2008-02-23 00:34:17 +03:00
rpc_sleep_on ( & xprt - > pending , task , xprt_connect_status ) ;
2010-04-17 00:41:57 +04:00
if ( test_bit ( XPRT_CLOSING , & xprt - > state ) )
return ;
if ( xprt_test_and_set_connecting ( xprt ) )
return ;
2006-03-20 21:44:16 +03:00
xprt - > stat . connect_start = jiffies ;
2013-01-08 18:26:49 +04:00
xprt - > ops - > connect ( xprt , task ) ;
2005-04-17 02:20:36 +04:00
}
2015-02-09 02:19:25 +03:00
xprt_release_write ( xprt , task ) ;
2005-04-17 02:20:36 +04:00
}
2005-08-12 00:25:26 +04:00
static void xprt_connect_status ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
2013-01-08 19:08:33 +04:00
struct rpc_xprt * xprt = task - > tk_rqstp - > rq_xprt ;
2005-04-17 02:20:36 +04:00
2008-06-12 01:56:13 +04:00
if ( task - > tk_status = = 0 ) {
2006-03-20 21:44:16 +03:00
xprt - > stat . connect_count + + ;
xprt - > stat . connect_time + = ( long ) jiffies - xprt - > stat . connect_start ;
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_connect_status: connection established \n " ,
2005-04-17 02:20:36 +04:00
task - > tk_pid ) ;
return ;
}
switch ( task - > tk_status ) {
2013-12-31 22:13:30 +04:00
case - ECONNREFUSED :
case - ECONNRESET :
case - ECONNABORTED :
case - ENETUNREACH :
case - EHOSTUNREACH :
2014-07-03 08:02:57 +04:00
case - EPIPE :
2009-03-11 21:38:00 +03:00
case - EAGAIN :
dprintk ( " RPC: %5u xprt_connect_status: retrying \n " , task - > tk_pid ) ;
2005-08-12 00:25:08 +04:00
break ;
2005-04-17 02:20:36 +04:00
case - ETIMEDOUT :
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_connect_status: connect attempt timed "
" out \n " , task - > tk_pid ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_connect_status: error %d connecting to "
" server %s \n " , task - > tk_pid , - task - > tk_status ,
2012-03-02 02:01:05 +04:00
xprt - > servername ) ;
2005-08-12 00:25:08 +04:00
task - > tk_status = - EIO ;
2005-04-17 02:20:36 +04:00
}
}
2005-08-12 00:25:26 +04:00
/**
* xprt_lookup_rqst - find an RPC request corresponding to an XID
* @ xprt : transport on which the original request was transmitted
* @ xid : RPC XID of incoming reply
*
2005-04-17 02:20:36 +04:00
*/
2006-09-27 09:29:38 +04:00
struct rpc_rqst * xprt_lookup_rqst ( struct rpc_xprt * xprt , __be32 xid )
2005-04-17 02:20:36 +04:00
{
2010-10-05 23:30:19 +04:00
struct rpc_rqst * entry ;
2005-04-17 02:20:36 +04:00
2010-10-05 23:30:19 +04:00
list_for_each_entry ( entry , & xprt - > recv , rq_list )
2014-10-28 21:24:13 +03:00
if ( entry - > rq_xid = = xid ) {
trace_xprt_lookup_rqst ( xprt , xid , 0 ) ;
2006-03-20 21:44:16 +03:00
return entry ;
2014-10-28 21:24:13 +03:00
}
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: xprt_lookup_rqst did not find xid %08x \n " ,
ntohl ( xid ) ) ;
2014-10-28 21:24:13 +03:00
trace_xprt_lookup_rqst ( xprt , xid , - ENOENT ) ;
2006-03-20 21:44:16 +03:00
xprt - > stat . bad_xids + + ;
return NULL ;
2005-04-17 02:20:36 +04:00
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_lookup_rqst ) ;
2005-04-17 02:20:36 +04:00
2010-05-07 21:34:27 +04:00
static void xprt_update_rtt ( struct rpc_task * task )
2005-08-26 03:25:52 +04:00
{
struct rpc_rqst * req = task - > tk_rqstp ;
struct rpc_rtt * rtt = task - > tk_client - > cl_rtt ;
2012-04-15 09:58:06 +04:00
unsigned int timer = task - > tk_msg . rpc_proc - > p_timer ;
2010-05-13 20:51:49 +04:00
long m = usecs_to_jiffies ( ktime_to_us ( req - > rq_rtt ) ) ;
2005-08-26 03:25:52 +04:00
if ( timer ) {
if ( req - > rq_ntrans = = 1 )
2010-05-07 21:34:47 +04:00
rpc_update_rtt ( rtt , timer , m ) ;
2005-08-26 03:25:52 +04:00
rpc_set_timeo ( rtt , timer , req - > rq_ntrans - 1 ) ;
}
}
2005-08-12 00:25:26 +04:00
/**
* xprt_complete_rqst - called when reply processing is complete
2005-08-26 03:25:52 +04:00
* @ task : RPC request that recently completed
2005-08-12 00:25:26 +04:00
* @ copied : actual number of bytes received from the transport
*
2005-08-26 03:25:52 +04:00
* Caller holds transport lock .
2005-04-17 02:20:36 +04:00
*/
2005-08-26 03:25:52 +04:00
void xprt_complete_rqst ( struct rpc_task * task , int copied )
2005-04-17 02:20:36 +04:00
{
2005-08-26 03:25:52 +04:00
struct rpc_rqst * req = task - > tk_rqstp ;
2008-02-23 00:34:12 +03:00
struct rpc_xprt * xprt = req - > rq_xprt ;
2005-04-17 02:20:36 +04:00
2005-08-26 03:25:52 +04:00
dprintk ( " RPC: %5u xid %08x complete (%d bytes received) \n " ,
task - > tk_pid , ntohl ( req - > rq_xid ) , copied ) ;
2014-10-28 21:24:13 +03:00
trace_xprt_complete_rqst ( xprt , req - > rq_xid , copied ) ;
2005-04-17 02:20:36 +04:00
2008-02-23 00:34:12 +03:00
xprt - > stat . recvs + + ;
2010-05-13 20:51:49 +04:00
req - > rq_rtt = ktime_sub ( ktime_get ( ) , req - > rq_xtime ) ;
2010-05-07 21:34:27 +04:00
if ( xprt - > ops - > timer ! = NULL )
xprt_update_rtt ( task ) ;
2006-03-20 21:44:17 +03:00
2005-04-17 02:20:36 +04:00
list_del_init ( & req - > rq_list ) ;
2008-03-21 23:19:41 +03:00
req - > rq_private_buf . len = copied ;
2009-04-01 17:23:28 +04:00
/* Ensure all writes are done before we update */
/* req->rq_reply_bytes_recvd */
2006-03-20 21:44:51 +03:00
smp_wmb ( ) ;
2009-04-01 17:23:28 +04:00
req - > rq_reply_bytes_recvd = copied ;
2008-02-23 00:34:12 +03:00
rpc_wake_up_queued_task ( & xprt - > pending , task ) ;
2005-04-17 02:20:36 +04:00
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_complete_rqst ) ;
2005-04-17 02:20:36 +04:00
2005-08-26 03:25:52 +04:00
static void xprt_timer ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
2005-08-26 03:25:52 +04:00
struct rpc_rqst * req = task - > tk_rqstp ;
2005-04-17 02:20:36 +04:00
struct rpc_xprt * xprt = req - > rq_xprt ;
2008-02-23 00:34:17 +03:00
if ( task - > tk_status ! = - ETIMEDOUT )
return ;
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_timer \n " , task - > tk_pid ) ;
2005-04-17 02:20:36 +04:00
2008-02-23 00:34:17 +03:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2009-04-01 17:23:28 +04:00
if ( ! req - > rq_reply_bytes_recvd ) {
2005-08-26 03:25:52 +04:00
if ( xprt - > ops - > timer )
2013-01-08 18:48:15 +04:00
xprt - > ops - > timer ( xprt , task ) ;
2008-02-23 00:34:17 +03:00
} else
task - > tk_status = 0 ;
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
}
2009-09-10 18:32:28 +04:00
static inline int xprt_has_timer ( struct rpc_xprt * xprt )
{
return xprt - > idle_timeout ! = 0 ;
}
2005-08-12 00:25:26 +04:00
/**
* xprt_prepare_transmit - reserve the transport before sending a request
* @ task : RPC task about to send a request
*
2005-04-17 02:20:36 +04:00
*/
2013-09-25 20:17:18 +04:00
bool xprt_prepare_transmit ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
struct rpc_rqst * req = task - > tk_rqstp ;
struct rpc_xprt * xprt = req - > rq_xprt ;
2013-09-25 20:17:18 +04:00
bool ret = false ;
2005-04-17 02:20:36 +04:00
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_prepare_transmit \n " , task - > tk_pid ) ;
2005-04-17 02:20:36 +04:00
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2013-09-24 20:00:27 +04:00
if ( ! req - > rq_bytes_sent ) {
if ( req - > rq_reply_bytes_recvd ) {
task - > tk_status = req - > rq_reply_bytes_recvd ;
goto out_unlock ;
}
if ( ( task - > tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT )
& & xprt_connected ( xprt )
& & req - > rq_connect_cookie = = xprt - > connect_cookie ) {
xprt - > ops - > set_retrans_timeout ( task ) ;
rpc_sleep_on ( & xprt - > pending , task , xprt_timer ) ;
goto out_unlock ;
}
2005-04-17 02:20:36 +04:00
}
2013-09-25 20:17:18 +04:00
if ( ! xprt - > ops - > reserve_xprt ( xprt , task ) ) {
task - > tk_status = - EAGAIN ;
goto out_unlock ;
}
ret = true ;
2005-04-17 02:20:36 +04:00
out_unlock :
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2013-09-25 20:17:18 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2006-07-28 01:22:50 +04:00
void xprt_end_transmit ( struct rpc_task * task )
2005-10-19 01:20:11 +04:00
{
2009-04-01 17:23:17 +04:00
xprt_release_write ( task - > tk_rqstp - > rq_xprt , task ) ;
2005-10-19 01:20:11 +04:00
}
2005-08-12 00:25:26 +04:00
/**
* xprt_transmit - send an RPC request on a transport
* @ task : controlling RPC task
*
* We have to copy the iovec because sendmsg fiddles with its contents .
*/
void xprt_transmit ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
struct rpc_rqst * req = task - > tk_rqstp ;
struct rpc_xprt * xprt = req - > rq_xprt ;
2012-02-15 01:19:18 +04:00
int status , numreqs ;
2005-04-17 02:20:36 +04:00
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_transmit(%u) \n " , task - > tk_pid , req - > rq_slen ) ;
2005-04-17 02:20:36 +04:00
2009-04-01 17:23:28 +04:00
if ( ! req - > rq_reply_bytes_recvd ) {
2009-04-01 17:23:03 +04:00
if ( list_empty ( & req - > rq_list ) & & rpc_reply_expected ( task ) ) {
/*
* Add to the list only if we ' re expecting a reply
*/
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
/* Update the softirq receive buffer */
memcpy ( & req - > rq_private_buf , & req - > rq_rcv_buf ,
sizeof ( req - > rq_private_buf ) ) ;
/* Add request to the receive list */
list_add_tail ( & req - > rq_list , & xprt - > recv ) ;
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
xprt_reset_majortimeo ( req ) ;
2005-06-22 21:16:28 +04:00
/* Turn off autodisconnect */
del_singleshot_timer_sync ( & xprt - > timer ) ;
2005-04-17 02:20:36 +04:00
}
} else if ( ! req - > rq_bytes_sent )
return ;
2010-05-07 21:34:47 +04:00
req - > rq_xtime = ktime_get ( ) ;
2005-08-12 00:25:23 +04:00
status = xprt - > ops - > send_request ( task ) ;
2014-10-28 21:24:13 +03:00
trace_xprt_transmit ( xprt , req - > rq_xid , status ) ;
2009-03-11 21:37:59 +03:00
if ( status ! = 0 ) {
task - > tk_status = status ;
return ;
}
2015-05-11 21:02:25 +03:00
xprt_inject_disconnect ( xprt ) ;
2006-03-20 21:44:16 +03:00
2009-03-11 21:37:59 +03:00
dprintk ( " RPC: %5u xmit complete \n " , task - > tk_pid ) ;
2011-04-18 23:57:32 +04:00
task - > tk_flags | = RPC_TASK_SENT ;
2009-03-11 21:37:59 +03:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2006-03-20 21:44:16 +03:00
2009-03-11 21:37:59 +03:00
xprt - > ops - > set_retrans_timeout ( task ) ;
2006-03-20 21:44:16 +03:00
2012-02-15 01:19:18 +04:00
numreqs = atomic_read ( & xprt - > num_reqs ) ;
if ( numreqs > xprt - > stat . max_slots )
xprt - > stat . max_slots = numreqs ;
2009-03-11 21:37:59 +03:00
xprt - > stat . sends + + ;
xprt - > stat . req_u + = xprt - > stat . sends - xprt - > stat . recvs ;
xprt - > stat . bklog_u + = xprt - > backlog . qlen ;
2012-02-15 01:19:18 +04:00
xprt - > stat . sending_u + = xprt - > sending . qlen ;
xprt - > stat . pending_u + = xprt - > pending . qlen ;
2005-04-17 02:20:36 +04:00
2009-03-11 21:37:59 +03:00
/* Don't race with disconnect */
if ( ! xprt_connected ( xprt ) )
task - > tk_status = - ENOTCONN ;
2013-09-25 19:31:54 +04:00
else {
2009-04-01 17:23:03 +04:00
/*
* Sleep on the pending queue since
* we ' re expecting a reply .
*/
2013-09-25 19:31:54 +04:00
if ( ! req - > rq_reply_bytes_recvd & & rpc_reply_expected ( task ) )
rpc_sleep_on ( & xprt - > pending , task , xprt_timer ) ;
req - > rq_connect_cookie = xprt - > connect_cookie ;
2009-04-01 17:23:03 +04:00
}
2009-03-11 21:37:59 +03:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
}
2013-04-14 18:49:37 +04:00
static void xprt_add_backlog ( struct rpc_xprt * xprt , struct rpc_task * task )
{
set_bit ( XPRT_CONGESTED , & xprt - > state ) ;
rpc_sleep_on ( & xprt - > backlog , task , NULL ) ;
}
static void xprt_wake_up_backlog ( struct rpc_xprt * xprt )
{
if ( rpc_wake_up_next ( & xprt - > backlog ) = = NULL )
clear_bit ( XPRT_CONGESTED , & xprt - > state ) ;
}
static bool xprt_throttle_congested ( struct rpc_xprt * xprt , struct rpc_task * task )
{
bool ret = false ;
if ( ! test_bit ( XPRT_CONGESTED , & xprt - > state ) )
goto out ;
spin_lock ( & xprt - > reserve_lock ) ;
if ( test_bit ( XPRT_CONGESTED , & xprt - > state ) ) {
rpc_sleep_on ( & xprt - > backlog , task , NULL ) ;
ret = true ;
}
spin_unlock ( & xprt - > reserve_lock ) ;
out :
return ret ;
}
2011-07-18 02:11:30 +04:00
static struct rpc_rqst * xprt_dynamic_alloc_slot ( struct rpc_xprt * xprt , gfp_t gfp_flags )
{
struct rpc_rqst * req = ERR_PTR ( - EAGAIN ) ;
if ( ! atomic_add_unless ( & xprt - > num_reqs , 1 , xprt - > max_reqs ) )
goto out ;
req = kzalloc ( sizeof ( struct rpc_rqst ) , gfp_flags ) ;
if ( req ! = NULL )
goto out ;
atomic_dec ( & xprt - > num_reqs ) ;
req = ERR_PTR ( - ENOMEM ) ;
out :
return req ;
}
static bool xprt_dynamic_free_slot ( struct rpc_xprt * xprt , struct rpc_rqst * req )
{
if ( atomic_add_unless ( & xprt - > num_reqs , - 1 , xprt - > min_reqs ) ) {
kfree ( req ) ;
return true ;
}
return false ;
}
2012-09-07 19:08:50 +04:00
void xprt_alloc_slot ( struct rpc_xprt * xprt , struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
2011-07-18 02:11:30 +04:00
struct rpc_rqst * req ;
2005-04-17 02:20:36 +04:00
2012-09-07 19:08:50 +04:00
spin_lock ( & xprt - > reserve_lock ) ;
2005-04-17 02:20:36 +04:00
if ( ! list_empty ( & xprt - > free ) ) {
2011-07-18 02:11:30 +04:00
req = list_entry ( xprt - > free . next , struct rpc_rqst , rq_list ) ;
list_del ( & req - > rq_list ) ;
goto out_init_req ;
}
2012-05-16 21:30:35 +04:00
req = xprt_dynamic_alloc_slot ( xprt , GFP_NOWAIT | __GFP_NOWARN ) ;
2011-07-18 02:11:30 +04:00
if ( ! IS_ERR ( req ) )
goto out_init_req ;
switch ( PTR_ERR ( req ) ) {
case - ENOMEM :
dprintk ( " RPC: dynamic allocation of request slot "
" failed! Retrying \n " ) ;
2012-05-19 20:12:53 +04:00
task - > tk_status = - ENOMEM ;
2011-07-18 02:11:30 +04:00
break ;
case - EAGAIN :
2013-04-14 18:49:37 +04:00
xprt_add_backlog ( xprt , task ) ;
2011-07-18 02:11:30 +04:00
dprintk ( " RPC: waiting for request slot \n " ) ;
2012-05-19 20:12:53 +04:00
default :
task - > tk_status = - EAGAIN ;
2005-04-17 02:20:36 +04:00
}
2012-09-07 19:08:50 +04:00
spin_unlock ( & xprt - > reserve_lock ) ;
2011-07-18 02:11:30 +04:00
return ;
out_init_req :
task - > tk_status = 0 ;
task - > tk_rqstp = req ;
xprt_request_init ( task , xprt ) ;
2012-09-07 19:08:50 +04:00
spin_unlock ( & xprt - > reserve_lock ) ;
}
EXPORT_SYMBOL_GPL ( xprt_alloc_slot ) ;
void xprt_lock_and_alloc_slot ( struct rpc_xprt * xprt , struct rpc_task * task )
{
/* Note: grabbing the xprt_lock_write() ensures that we throttle
* new slot allocation if the transport is congested ( i . e . when
* reconnecting a stream transport or when out of socket write
* buffer space ) .
*/
if ( xprt_lock_write ( xprt , task ) ) {
xprt_alloc_slot ( xprt , task ) ;
xprt_release_write ( xprt , task ) ;
}
2005-04-17 02:20:36 +04:00
}
2012-09-07 19:08:50 +04:00
EXPORT_SYMBOL_GPL ( xprt_lock_and_alloc_slot ) ;
2005-04-17 02:20:36 +04:00
2010-04-17 00:37:01 +04:00
static void xprt_free_slot ( struct rpc_xprt * xprt , struct rpc_rqst * req )
{
spin_lock ( & xprt - > reserve_lock ) ;
2011-12-01 23:16:17 +04:00
if ( ! xprt_dynamic_free_slot ( xprt , req ) ) {
memset ( req , 0 , sizeof ( * req ) ) ; /* mark unused */
list_add ( & req - > rq_list , & xprt - > free ) ;
}
2013-04-14 18:49:37 +04:00
xprt_wake_up_backlog ( xprt ) ;
2010-04-17 00:37:01 +04:00
spin_unlock ( & xprt - > reserve_lock ) ;
}
2011-07-18 00:57:32 +04:00
static void xprt_free_all_slots ( struct rpc_xprt * xprt )
{
struct rpc_rqst * req ;
while ( ! list_empty ( & xprt - > free ) ) {
req = list_first_entry ( & xprt - > free , struct rpc_rqst , rq_list ) ;
list_del ( & req - > rq_list ) ;
kfree ( req ) ;
}
}
2011-07-18 02:11:30 +04:00
struct rpc_xprt * xprt_alloc ( struct net * net , size_t size ,
unsigned int num_prealloc ,
unsigned int max_alloc )
2010-09-29 16:02:43 +04:00
{
struct rpc_xprt * xprt ;
2011-07-18 00:57:32 +04:00
struct rpc_rqst * req ;
int i ;
2010-09-29 16:02:43 +04:00
xprt = kzalloc ( size , GFP_KERNEL ) ;
if ( xprt = = NULL )
goto out ;
2011-07-18 00:57:32 +04:00
xprt_init ( xprt , net ) ;
for ( i = 0 ; i < num_prealloc ; i + + ) {
req = kzalloc ( sizeof ( struct rpc_rqst ) , GFP_KERNEL ) ;
if ( ! req )
2013-10-15 07:44:30 +04:00
goto out_free ;
2011-07-18 00:57:32 +04:00
list_add ( & req - > rq_list , & xprt - > free ) ;
}
2011-07-18 02:11:30 +04:00
if ( max_alloc > num_prealloc )
xprt - > max_reqs = max_alloc ;
else
xprt - > max_reqs = num_prealloc ;
xprt - > min_reqs = num_prealloc ;
atomic_set ( & xprt - > num_reqs , num_prealloc ) ;
2010-09-29 16:02:43 +04:00
return xprt ;
out_free :
2011-07-18 00:57:32 +04:00
xprt_free ( xprt ) ;
2010-09-29 16:02:43 +04:00
out :
return NULL ;
}
EXPORT_SYMBOL_GPL ( xprt_alloc ) ;
2010-09-29 16:03:13 +04:00
void xprt_free ( struct rpc_xprt * xprt )
{
2010-09-29 16:05:43 +04:00
put_net ( xprt - > xprt_net ) ;
2011-07-18 00:57:32 +04:00
xprt_free_all_slots ( xprt ) ;
2010-09-29 16:03:13 +04:00
kfree ( xprt ) ;
}
EXPORT_SYMBOL_GPL ( xprt_free ) ;
2005-08-12 00:25:26 +04:00
/**
* xprt_reserve - allocate an RPC request slot
* @ task : RPC task requesting a slot allocation
*
2013-04-14 18:49:37 +04:00
* If the transport is marked as being congested , or if no more
* slots are available , place the task on the transport ' s
2005-08-12 00:25:26 +04:00
* backlog queue .
*/
void xprt_reserve ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
2013-01-08 19:03:22 +04:00
struct rpc_xprt * xprt ;
2005-04-17 02:20:36 +04:00
2011-07-18 00:01:03 +04:00
task - > tk_status = 0 ;
if ( task - > tk_rqstp ! = NULL )
return ;
task - > tk_timeout = 0 ;
task - > tk_status = - EAGAIN ;
2013-01-08 19:03:22 +04:00
rcu_read_lock ( ) ;
xprt = rcu_dereference ( task - > tk_client - > cl_xprt ) ;
2013-04-14 18:49:37 +04:00
if ( ! xprt_throttle_congested ( xprt , task ) )
xprt - > ops - > alloc_slot ( xprt , task ) ;
rcu_read_unlock ( ) ;
}
/**
* xprt_retry_reserve - allocate an RPC request slot
* @ task : RPC task requesting a slot allocation
*
* If no more slots are available , place the task on the transport ' s
* backlog queue .
* Note that the only difference with xprt_reserve is that we now
* ignore the value of the XPRT_CONGESTED flag .
*/
void xprt_retry_reserve ( struct rpc_task * task )
{
struct rpc_xprt * xprt ;
task - > tk_status = 0 ;
if ( task - > tk_rqstp ! = NULL )
return ;
task - > tk_timeout = 0 ;
task - > tk_status = - EAGAIN ;
rcu_read_lock ( ) ;
xprt = rcu_dereference ( task - > tk_client - > cl_xprt ) ;
2012-09-07 19:08:50 +04:00
xprt - > ops - > alloc_slot ( xprt , task ) ;
2013-01-08 19:03:22 +04:00
rcu_read_unlock ( ) ;
2005-04-17 02:20:36 +04:00
}
2006-09-27 09:29:38 +04:00
static inline __be32 xprt_alloc_xid ( struct rpc_xprt * xprt )
2005-04-17 02:20:36 +04:00
{
2010-04-21 06:06:52 +04:00
return ( __force __be32 ) xprt - > xid + + ;
2005-04-17 02:20:36 +04:00
}
static inline void xprt_init_xid ( struct rpc_xprt * xprt )
{
2014-01-11 16:15:59 +04:00
xprt - > xid = prandom_u32 ( ) ;
2005-04-17 02:20:36 +04:00
}
2005-08-12 00:25:26 +04:00
static void xprt_request_init ( struct rpc_task * task , struct rpc_xprt * xprt )
2005-04-17 02:20:36 +04:00
{
struct rpc_rqst * req = task - > tk_rqstp ;
2011-07-18 02:11:30 +04:00
INIT_LIST_HEAD ( & req - > rq_list ) ;
2007-12-21 00:03:55 +03:00
req - > rq_timeout = task - > tk_client - > cl_timeout - > to_initval ;
2005-04-17 02:20:36 +04:00
req - > rq_task = task ;
req - > rq_xprt = xprt ;
2006-01-03 11:55:49 +03:00
req - > rq_buffer = NULL ;
2005-04-17 02:20:36 +04:00
req - > rq_xid = xprt_alloc_xid ( xprt ) ;
2013-09-25 19:31:54 +04:00
req - > rq_connect_cookie = xprt - > connect_cookie - 1 ;
2013-09-27 19:28:40 +04:00
req - > rq_bytes_sent = 0 ;
req - > rq_snd_buf . len = 0 ;
req - > rq_snd_buf . buflen = 0 ;
req - > rq_rcv_buf . len = 0 ;
req - > rq_rcv_buf . buflen = 0 ;
2005-10-14 00:54:43 +04:00
req - > rq_release_snd_buf = NULL ;
2006-08-31 23:44:52 +04:00
xprt_reset_majortimeo ( req ) ;
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u reserved req %p xid %08x \n " , task - > tk_pid ,
2005-04-17 02:20:36 +04:00
req , ntohl ( req - > rq_xid ) ) ;
}
2005-08-12 00:25:26 +04:00
/**
* xprt_release - release an RPC request slot
* @ task : task which is finished with the slot
*
2005-04-17 02:20:36 +04:00
*/
2005-08-12 00:25:26 +04:00
void xprt_release ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
2009-04-01 17:23:03 +04:00
struct rpc_xprt * xprt ;
2013-01-07 23:30:46 +04:00
struct rpc_rqst * req = task - > tk_rqstp ;
2005-04-17 02:20:36 +04:00
2013-01-07 23:30:46 +04:00
if ( req = = NULL ) {
if ( task - > tk_client ) {
rcu_read_lock ( ) ;
xprt = rcu_dereference ( task - > tk_client - > cl_xprt ) ;
if ( xprt - > snd_task = = task )
xprt_release_write ( xprt , task ) ;
rcu_read_unlock ( ) ;
}
2005-04-17 02:20:36 +04:00
return ;
2013-01-07 23:30:46 +04:00
}
2009-04-01 17:23:03 +04:00
xprt = req - > rq_xprt ;
2012-02-17 22:15:24 +04:00
if ( task - > tk_ops - > rpc_count_stats ! = NULL )
task - > tk_ops - > rpc_count_stats ( task , task - > tk_calldata ) ;
else if ( task - > tk_client )
rpc_count_iostats ( task , task - > tk_client - > cl_metrics ) ;
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2005-08-26 03:25:51 +04:00
xprt - > ops - > release_xprt ( xprt , task ) ;
2005-08-26 03:25:53 +04:00
if ( xprt - > ops - > release_request )
xprt - > ops - > release_request ( task ) ;
2005-04-17 02:20:36 +04:00
if ( ! list_empty ( & req - > rq_list ) )
list_del ( & req - > rq_list ) ;
xprt - > last_used = jiffies ;
2009-09-10 18:32:28 +04:00
if ( list_empty ( & xprt - > recv ) & & xprt_has_timer ( xprt ) )
2005-08-12 00:25:23 +04:00
mod_timer ( & xprt - > timer ,
2005-08-26 03:25:55 +04:00
xprt - > last_used + xprt - > idle_timeout ) ;
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2010-04-17 00:37:01 +04:00
if ( req - > rq_buffer )
2009-04-01 17:23:03 +04:00
xprt - > ops - > buf_free ( req - > rq_buffer ) ;
2015-05-11 21:02:25 +03:00
xprt_inject_disconnect ( xprt ) ;
2010-07-31 22:29:08 +04:00
if ( req - > rq_cred ! = NULL )
put_rpccred ( req - > rq_cred ) ;
2005-04-17 02:20:36 +04:00
task - > tk_rqstp = NULL ;
2005-10-14 00:54:43 +04:00
if ( req - > rq_release_snd_buf )
req - > rq_release_snd_buf ( req ) ;
2009-04-01 17:23:03 +04:00
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u release request %p \n " , task - > tk_pid , req ) ;
2010-04-17 00:37:01 +04:00
if ( likely ( ! bc_prealloc ( req ) ) )
xprt_free_slot ( xprt , req ) ;
else
2010-03-19 22:36:22 +03:00
xprt_free_bc_request ( req ) ;
2005-04-17 02:20:36 +04:00
}
2011-07-18 00:57:32 +04:00
static void xprt_init ( struct rpc_xprt * xprt , struct net * net )
2006-08-23 04:06:20 +04:00
{
2015-02-25 04:31:39 +03:00
kref_init ( & xprt - > kref ) ;
2006-08-23 04:06:20 +04:00
spin_lock_init ( & xprt - > transport_lock ) ;
spin_lock_init ( & xprt - > reserve_lock ) ;
INIT_LIST_HEAD ( & xprt - > free ) ;
INIT_LIST_HEAD ( & xprt - > recv ) ;
2011-07-14 03:20:49 +04:00
# if defined(CONFIG_SUNRPC_BACKCHANNEL)
2009-04-01 17:22:59 +04:00
spin_lock_init ( & xprt - > bc_pa_lock ) ;
INIT_LIST_HEAD ( & xprt - > bc_pa_list ) ;
2011-07-14 03:20:49 +04:00
# endif /* CONFIG_SUNRPC_BACKCHANNEL */
2009-04-01 17:22:59 +04:00
2006-08-23 04:06:20 +04:00
xprt - > last_used = jiffies ;
xprt - > cwnd = RPC_INITCWND ;
2007-03-30 00:48:04 +04:00
xprt - > bind_index = 0 ;
2006-08-23 04:06:20 +04:00
rpc_init_wait_queue ( & xprt - > binding , " xprt_binding " ) ;
rpc_init_wait_queue ( & xprt - > pending , " xprt_pending " ) ;
2011-07-18 02:11:34 +04:00
rpc_init_priority_wait_queue ( & xprt - > sending , " xprt_sending " ) ;
2006-08-23 04:06:20 +04:00
rpc_init_priority_wait_queue ( & xprt - > backlog , " xprt_backlog " ) ;
xprt_init_xid ( xprt ) ;
2011-07-18 00:57:32 +04:00
xprt - > xprt_net = get_net ( net ) ;
2011-07-18 00:01:09 +04:00
}
/**
* xprt_create_transport - create an RPC transport
* @ args : rpc transport creation arguments
*
*/
struct rpc_xprt * xprt_create_transport ( struct xprt_create * args )
{
struct rpc_xprt * xprt ;
struct xprt_class * t ;
spin_lock ( & xprt_list_lock ) ;
list_for_each_entry ( t , & xprt_list , list ) {
if ( t - > ident = = args - > ident ) {
spin_unlock ( & xprt_list_lock ) ;
goto found ;
}
}
spin_unlock ( & xprt_list_lock ) ;
2014-07-16 23:38:32 +04:00
dprintk ( " RPC: transport (%d) not supported \n " , args - > ident ) ;
2011-07-18 00:01:09 +04:00
return ERR_PTR ( - EIO ) ;
found :
xprt = t - > setup ( args ) ;
if ( IS_ERR ( xprt ) ) {
dprintk ( " RPC: xprt_create_transport: failed, %ld \n " ,
- PTR_ERR ( xprt ) ) ;
2011-07-18 00:57:32 +04:00
goto out ;
2011-07-18 00:01:09 +04:00
}
2013-04-11 23:06:36 +04:00
if ( args - > flags & XPRT_CREATE_NO_IDLE_TIMEOUT )
xprt - > idle_timeout = 0 ;
2011-07-18 00:57:32 +04:00
INIT_WORK ( & xprt - > task_cleanup , xprt_autoclose ) ;
if ( xprt_has_timer ( xprt ) )
setup_timer ( & xprt - > timer , xprt_init_autodisconnect ,
( unsigned long ) xprt ) ;
else
init_timer ( & xprt - > timer ) ;
2012-03-02 02:01:05 +04:00
if ( strlen ( args - > servername ) > RPC_MAXNETNAMELEN ) {
xprt_destroy ( xprt ) ;
return ERR_PTR ( - EINVAL ) ;
}
xprt - > servername = kstrdup ( args - > servername , GFP_KERNEL ) ;
if ( xprt - > servername = = NULL ) {
xprt_destroy ( xprt ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2015-03-31 19:03:28 +03:00
rpc_xprt_debugfs_register ( xprt ) ;
2014-11-26 22:44:44 +03:00
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: created transport %p with %u slots \n " , xprt ,
2006-08-23 04:06:20 +04:00
xprt - > max_reqs ) ;
2011-07-18 00:57:32 +04:00
out :
2006-08-23 04:06:20 +04:00
return xprt ;
}
2005-08-12 00:25:26 +04:00
/**
* xprt_destroy - destroy an RPC transport , killing off all requests .
2011-03-16 02:56:30 +03:00
* @ xprt : transport to destroy
2005-08-12 00:25:26 +04:00
*
2005-04-17 02:20:36 +04:00
*/
2011-03-16 02:56:30 +03:00
static void xprt_destroy ( struct rpc_xprt * xprt )
2005-04-17 02:20:36 +04:00
{
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: destroying transport %p \n " , xprt ) ;
2015-09-18 22:53:24 +03:00
/* Exclude transport connect/disconnect handlers */
wait_on_bit_lock ( & xprt - > state , XPRT_LOCKED , TASK_UNINTERRUPTIBLE ) ;
2006-01-03 11:55:56 +03:00
del_timer_sync ( & xprt - > timer ) ;
2006-10-17 22:44:27 +04:00
2014-11-26 22:44:44 +03:00
rpc_xprt_debugfs_unregister ( xprt ) ;
2008-02-23 01:06:55 +03:00
rpc_destroy_wait_queue ( & xprt - > binding ) ;
rpc_destroy_wait_queue ( & xprt - > pending ) ;
rpc_destroy_wait_queue ( & xprt - > sending ) ;
rpc_destroy_wait_queue ( & xprt - > backlog ) ;
2010-08-04 01:22:20 +04:00
cancel_work_sync ( & xprt - > task_cleanup ) ;
2012-03-02 02:01:05 +04:00
kfree ( xprt - > servername ) ;
2006-10-17 22:44:27 +04:00
/*
* Tear down transport state and free the rpc_xprt
*/
2005-08-12 00:25:23 +04:00
xprt - > ops - > destroy ( xprt ) ;
2006-09-05 20:55:57 +04:00
}
2005-04-17 02:20:36 +04:00
2015-02-25 04:31:39 +03:00
static void xprt_destroy_kref ( struct kref * kref )
{
xprt_destroy ( container_of ( kref , struct rpc_xprt , kref ) ) ;
}
/**
* xprt_get - return a reference to an RPC transport .
* @ xprt : pointer to the transport
*
*/
struct rpc_xprt * xprt_get ( struct rpc_xprt * xprt )
{
if ( xprt ! = NULL & & kref_get_unless_zero ( & xprt - > kref ) )
return xprt ;
return NULL ;
}
EXPORT_SYMBOL_GPL ( xprt_get ) ;
2006-09-05 20:55:57 +04:00
/**
* xprt_put - release a reference to an RPC transport .
* @ xprt : pointer to the transport
*
*/
void xprt_put ( struct rpc_xprt * xprt )
{
2015-02-25 04:31:39 +03:00
if ( xprt ! = NULL )
kref_put ( & xprt - > kref , xprt_destroy_kref ) ;
2006-09-05 20:55:57 +04:00
}
2016-01-07 22:50:10 +03:00
EXPORT_SYMBOL_GPL ( xprt_put ) ;