2005-04-17 02:20:36 +04:00
/*
* linux / net / sunrpc / xprt . c
*
* This is a generic RPC call interface supporting congestion avoidance ,
* and asynchronous calls .
*
* The interface works like this :
*
* - When a process places a call , it allocates a request slot if
* one is available . Otherwise , it sleeps on the backlog queue
* ( xprt_reserve ) .
* - Next , the caller puts together the RPC message , stuffs it into
2005-08-12 00:25:47 +04:00
* the request struct , and calls xprt_transmit ( ) .
* - xprt_transmit sends the message and installs the caller on the
2009-04-01 17:23:03 +04:00
* transport ' s wait list . At the same time , if a reply is expected ,
* it installs a timer that is run after the packet ' s timeout has
* expired .
2005-04-17 02:20:36 +04:00
* - When a packet arrives , the data_ready handler walks the list of
2005-08-12 00:25:47 +04:00
* pending requests for that transport . If a matching XID is found , the
2005-04-17 02:20:36 +04:00
* caller is woken up , and the timer removed .
* - When no reply arrives within the timeout interval , the timer is
* fired by the kernel and runs xprt_timer ( ) . It either adjusts the
* timeout values ( minor timeout ) or wakes up the caller with a status
* of - ETIMEDOUT .
* - When the caller receives a notification from RPC that a reply arrived ,
* it should release the RPC slot , and process the reply .
* If the call timed out , it may choose to retry the operation by
* adjusting the initial timeout value , and simply calling rpc_call
* again .
*
* Support for async RPC is done through a set of RPC - specific scheduling
* primitives that ` transparently ' work for processes as well as async
* tasks that rely on callbacks .
*
* Copyright ( C ) 1995 - 1997 , Olaf Kirch < okir @ monad . swb . de >
2005-08-12 00:25:47 +04:00
*
* Transport switch API copyright ( C ) 2005 , Chuck Lever < cel @ netapp . com >
2005-04-17 02:20:36 +04:00
*/
2005-08-12 00:25:23 +04:00
# include <linux/module.h>
2005-04-17 02:20:36 +04:00
# include <linux/types.h>
2005-08-12 00:25:23 +04:00
# include <linux/interrupt.h>
2005-04-17 02:20:36 +04:00
# include <linux/workqueue.h>
2006-05-25 09:40:51 +04:00
# include <linux/net.h>
2010-05-07 21:34:47 +04:00
# include <linux/ktime.h>
2005-04-17 02:20:36 +04:00
2005-08-12 00:25:23 +04:00
# include <linux/sunrpc/clnt.h>
2006-03-20 21:44:22 +03:00
# include <linux/sunrpc/metrics.h>
2010-03-19 22:36:22 +03:00
# include <linux/sunrpc/bc_xprt.h>
2005-04-17 02:20:36 +04:00
2009-04-01 17:23:03 +04:00
# include "sunrpc.h"
2005-04-17 02:20:36 +04:00
/*
* Local variables
*/
# ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_XPRT
# endif
/*
* Local functions
*/
static void xprt_request_init ( struct rpc_task * , struct rpc_xprt * ) ;
static void xprt_connect_status ( struct rpc_task * task ) ;
static int __xprt_get_cong ( struct rpc_xprt * , struct rpc_task * ) ;
2007-11-22 14:40:22 +03:00
static DEFINE_SPINLOCK ( xprt_list_lock ) ;
2007-09-10 21:46:00 +04:00
static LIST_HEAD ( xprt_list ) ;
2005-08-26 03:25:54 +04:00
/*
* The transport code maintains an estimate on the maximum number of out -
* standing RPC requests , using a smoothed version of the congestion
* avoidance implemented in 44 BSD . This is basically the Van Jacobson
* congestion algorithm : If a retransmit occurs , the congestion window is
* halved ; otherwise , it is incremented by 1 / cwnd when
*
* - a reply is received and
* - a full number of requests are outstanding and
* - the congestion window hasn ' t been updated recently .
*/
# define RPC_CWNDSHIFT (8U)
# define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
# define RPC_INITCWND RPC_CWNDSCALE
# define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
# define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
2005-04-17 02:20:36 +04:00
2007-09-10 21:46:00 +04:00
/**
* xprt_register_transport - register a transport implementation
* @ transport : transport to register
*
* If a transport implementation is loaded as a kernel module , it can
* call this interface to make itself known to the RPC client .
*
* Returns :
* 0 : transport successfully registered
* - EEXIST : transport already registered
* - EINVAL : transport module being unloaded
*/
int xprt_register_transport ( struct xprt_class * transport )
{
struct xprt_class * t ;
int result ;
result = - EEXIST ;
spin_lock ( & xprt_list_lock ) ;
list_for_each_entry ( t , & xprt_list , list ) {
/* don't register the same transport class twice */
2007-09-10 21:47:57 +04:00
if ( t - > ident = = transport - > ident )
2007-09-10 21:46:00 +04:00
goto out ;
}
2008-07-31 09:53:56 +04:00
list_add_tail ( & transport - > list , & xprt_list ) ;
printk ( KERN_INFO " RPC: Registered %s transport module. \n " ,
transport - > name ) ;
result = 0 ;
2007-09-10 21:46:00 +04:00
out :
spin_unlock ( & xprt_list_lock ) ;
return result ;
}
EXPORT_SYMBOL_GPL ( xprt_register_transport ) ;
/**
* xprt_unregister_transport - unregister a transport implementation
2008-02-14 02:03:23 +03:00
* @ transport : transport to unregister
2007-09-10 21:46:00 +04:00
*
* Returns :
* 0 : transport successfully unregistered
* - ENOENT : transport never registered
*/
int xprt_unregister_transport ( struct xprt_class * transport )
{
struct xprt_class * t ;
int result ;
result = 0 ;
spin_lock ( & xprt_list_lock ) ;
list_for_each_entry ( t , & xprt_list , list ) {
if ( t = = transport ) {
printk ( KERN_INFO
" RPC: Unregistered %s transport module. \n " ,
transport - > name ) ;
list_del_init ( & transport - > list ) ;
goto out ;
}
}
result = - ENOENT ;
out :
spin_unlock ( & xprt_list_lock ) ;
return result ;
}
EXPORT_SYMBOL_GPL ( xprt_unregister_transport ) ;
2009-03-11 21:37:56 +03:00
/**
* xprt_load_transport - load a transport implementation
* @ transport_name : transport to load
*
* Returns :
* 0 : transport successfully loaded
* - ENOENT : transport module not available
*/
int xprt_load_transport ( const char * transport_name )
{
struct xprt_class * t ;
int result ;
result = 0 ;
spin_lock ( & xprt_list_lock ) ;
list_for_each_entry ( t , & xprt_list , list ) {
if ( strcmp ( t - > name , transport_name ) = = 0 ) {
spin_unlock ( & xprt_list_lock ) ;
goto out ;
}
}
spin_unlock ( & xprt_list_lock ) ;
2010-05-25 01:33:05 +04:00
result = request_module ( " xprt%s " , transport_name ) ;
2009-03-11 21:37:56 +03:00
out :
return result ;
}
EXPORT_SYMBOL_GPL ( xprt_load_transport ) ;
2005-08-26 03:25:51 +04:00
/**
* xprt_reserve_xprt - serialize write access to transports
* @ task : task that is requesting access to the transport
*
* This prevents mixing the payload of separate requests , and prevents
* transport connects from colliding with writes . No congestion control
* is provided .
*/
int xprt_reserve_xprt ( struct rpc_task * task )
{
struct rpc_rqst * req = task - > tk_rqstp ;
2009-04-01 17:23:17 +04:00
struct rpc_xprt * xprt = req - > rq_xprt ;
2005-08-26 03:25:51 +04:00
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) ) {
if ( task = = xprt - > snd_task )
return 1 ;
if ( task = = NULL )
return 0 ;
goto out_sleep ;
}
xprt - > snd_task = task ;
if ( req ) {
req - > rq_bytes_sent = 0 ;
req - > rq_ntrans + + ;
}
return 1 ;
out_sleep :
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u failed to lock transport %p \n " ,
2005-08-26 03:25:51 +04:00
task - > tk_pid , xprt ) ;
task - > tk_timeout = 0 ;
task - > tk_status = - EAGAIN ;
if ( req & & req - > rq_ntrans )
2008-02-23 00:34:17 +03:00
rpc_sleep_on ( & xprt - > resend , task , NULL ) ;
2005-08-26 03:25:51 +04:00
else
2008-02-23 00:34:17 +03:00
rpc_sleep_on ( & xprt - > sending , task , NULL ) ;
2005-08-26 03:25:51 +04:00
return 0 ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_reserve_xprt ) ;
2005-08-26 03:25:51 +04:00
2006-01-03 11:55:55 +03:00
static void xprt_clear_locked ( struct rpc_xprt * xprt )
{
xprt - > snd_task = NULL ;
if ( ! test_bit ( XPRT_CLOSE_WAIT , & xprt - > state ) | | xprt - > shutdown ) {
smp_mb__before_clear_bit ( ) ;
clear_bit ( XPRT_LOCKED , & xprt - > state ) ;
smp_mb__after_clear_bit ( ) ;
} else
2007-06-15 02:00:42 +04:00
queue_work ( rpciod_workqueue , & xprt - > task_cleanup ) ;
2006-01-03 11:55:55 +03:00
}
2005-04-17 02:20:36 +04:00
/*
2005-08-26 03:25:51 +04:00
* xprt_reserve_xprt_cong - serialize write access to transports
* @ task : task that is requesting access to the transport
*
* Same as xprt_reserve_xprt , but Van Jacobson congestion control is
* integrated into the decision of whether a request is allowed to be
* woken up and given access to the transport .
2005-04-17 02:20:36 +04:00
*/
2005-08-26 03:25:51 +04:00
int xprt_reserve_xprt_cong ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
2005-08-26 03:25:51 +04:00
struct rpc_xprt * xprt = task - > tk_xprt ;
2005-04-17 02:20:36 +04:00
struct rpc_rqst * req = task - > tk_rqstp ;
2005-08-12 00:25:38 +04:00
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) ) {
2005-04-17 02:20:36 +04:00
if ( task = = xprt - > snd_task )
return 1 ;
goto out_sleep ;
}
2005-08-26 03:25:51 +04:00
if ( __xprt_get_cong ( xprt , task ) ) {
2005-04-17 02:20:36 +04:00
xprt - > snd_task = task ;
if ( req ) {
req - > rq_bytes_sent = 0 ;
req - > rq_ntrans + + ;
}
return 1 ;
}
2006-01-03 11:55:55 +03:00
xprt_clear_locked ( xprt ) ;
2005-04-17 02:20:36 +04:00
out_sleep :
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u failed to lock transport %p \n " , task - > tk_pid , xprt ) ;
2005-04-17 02:20:36 +04:00
task - > tk_timeout = 0 ;
task - > tk_status = - EAGAIN ;
if ( req & & req - > rq_ntrans )
2008-02-23 00:34:17 +03:00
rpc_sleep_on ( & xprt - > resend , task , NULL ) ;
2005-04-17 02:20:36 +04:00
else
2008-02-23 00:34:17 +03:00
rpc_sleep_on ( & xprt - > sending , task , NULL ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_reserve_xprt_cong ) ;
2005-04-17 02:20:36 +04:00
2005-08-26 03:25:51 +04:00
static inline int xprt_lock_write ( struct rpc_xprt * xprt , struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
int retval ;
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2005-08-26 03:25:51 +04:00
retval = xprt - > ops - > reserve_xprt ( task ) ;
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
return retval ;
}
2005-08-26 03:25:51 +04:00
static void __xprt_lock_write_next ( struct rpc_xprt * xprt )
2005-08-26 03:25:51 +04:00
{
struct rpc_task * task ;
struct rpc_rqst * req ;
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) )
return ;
task = rpc_wake_up_next ( & xprt - > resend ) ;
if ( ! task ) {
task = rpc_wake_up_next ( & xprt - > sending ) ;
if ( ! task )
goto out_unlock ;
}
req = task - > tk_rqstp ;
xprt - > snd_task = task ;
if ( req ) {
req - > rq_bytes_sent = 0 ;
req - > rq_ntrans + + ;
}
return ;
out_unlock :
2006-01-03 11:55:55 +03:00
xprt_clear_locked ( xprt ) ;
2005-08-26 03:25:51 +04:00
}
static void __xprt_lock_write_next_cong ( struct rpc_xprt * xprt )
2005-04-17 02:20:36 +04:00
{
struct rpc_task * task ;
2005-08-12 00:25:38 +04:00
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) )
2005-04-17 02:20:36 +04:00
return ;
2005-08-26 03:25:51 +04:00
if ( RPCXPRT_CONGESTED ( xprt ) )
2005-04-17 02:20:36 +04:00
goto out_unlock ;
task = rpc_wake_up_next ( & xprt - > resend ) ;
if ( ! task ) {
task = rpc_wake_up_next ( & xprt - > sending ) ;
if ( ! task )
goto out_unlock ;
}
2005-08-26 03:25:51 +04:00
if ( __xprt_get_cong ( xprt , task ) ) {
2005-04-17 02:20:36 +04:00
struct rpc_rqst * req = task - > tk_rqstp ;
xprt - > snd_task = task ;
if ( req ) {
req - > rq_bytes_sent = 0 ;
req - > rq_ntrans + + ;
}
return ;
}
out_unlock :
2006-01-03 11:55:55 +03:00
xprt_clear_locked ( xprt ) ;
2005-04-17 02:20:36 +04:00
}
2005-08-26 03:25:51 +04:00
/**
* xprt_release_xprt - allow other requests to use a transport
* @ xprt : transport with other tasks potentially waiting
* @ task : task that is releasing access to the transport
*
* Note that " task " can be NULL . No congestion control is provided .
2005-04-17 02:20:36 +04:00
*/
2005-08-26 03:25:51 +04:00
void xprt_release_xprt ( struct rpc_xprt * xprt , struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
if ( xprt - > snd_task = = task ) {
2006-01-03 11:55:55 +03:00
xprt_clear_locked ( xprt ) ;
2005-04-17 02:20:36 +04:00
__xprt_lock_write_next ( xprt ) ;
}
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_release_xprt ) ;
2005-04-17 02:20:36 +04:00
2005-08-26 03:25:51 +04:00
/**
* xprt_release_xprt_cong - allow other requests to use a transport
* @ xprt : transport with other tasks potentially waiting
* @ task : task that is releasing access to the transport
*
* Note that " task " can be NULL . Another task is awoken to use the
* transport if the transport ' s congestion window allows it .
*/
void xprt_release_xprt_cong ( struct rpc_xprt * xprt , struct rpc_task * task )
{
if ( xprt - > snd_task = = task ) {
2006-01-03 11:55:55 +03:00
xprt_clear_locked ( xprt ) ;
2005-08-26 03:25:51 +04:00
__xprt_lock_write_next_cong ( xprt ) ;
}
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_release_xprt_cong ) ;
2005-08-26 03:25:51 +04:00
static inline void xprt_release_write ( struct rpc_xprt * xprt , struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2005-08-26 03:25:51 +04:00
xprt - > ops - > release_xprt ( xprt , task ) ;
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Van Jacobson congestion avoidance . Check if the congestion window
* overflowed . Put the task to sleep if this is the case .
*/
static int
__xprt_get_cong ( struct rpc_xprt * xprt , struct rpc_task * task )
{
struct rpc_rqst * req = task - > tk_rqstp ;
if ( req - > rq_cong )
return 1 ;
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu \n " ,
2005-04-17 02:20:36 +04:00
task - > tk_pid , xprt - > cong , xprt - > cwnd ) ;
if ( RPCXPRT_CONGESTED ( xprt ) )
return 0 ;
req - > rq_cong = 1 ;
xprt - > cong + = RPC_CWNDSCALE ;
return 1 ;
}
/*
* Adjust the congestion window , and wake up the next task
* that has been sleeping due to congestion
*/
static void
__xprt_put_cong ( struct rpc_xprt * xprt , struct rpc_rqst * req )
{
if ( ! req - > rq_cong )
return ;
req - > rq_cong = 0 ;
xprt - > cong - = RPC_CWNDSCALE ;
2005-08-26 03:25:51 +04:00
__xprt_lock_write_next_cong ( xprt ) ;
2005-04-17 02:20:36 +04:00
}
2005-08-26 03:25:53 +04:00
/**
* xprt_release_rqst_cong - housekeeping when request is complete
* @ task : RPC request that recently completed
*
* Useful for transports that require congestion control .
*/
void xprt_release_rqst_cong ( struct rpc_task * task )
{
__xprt_put_cong ( task - > tk_xprt , task - > tk_rqstp ) ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_release_rqst_cong ) ;
2005-08-26 03:25:53 +04:00
2005-08-26 03:25:52 +04:00
/**
* xprt_adjust_cwnd - adjust transport congestion window
* @ task : recently completed RPC request used to adjust window
* @ result : result code of completed RPC request
*
2005-04-17 02:20:36 +04:00
* We use a time - smoothed congestion estimator to avoid heavy oscillation .
*/
2005-08-26 03:25:52 +04:00
void xprt_adjust_cwnd ( struct rpc_task * task , int result )
2005-04-17 02:20:36 +04:00
{
2005-08-26 03:25:52 +04:00
struct rpc_rqst * req = task - > tk_rqstp ;
struct rpc_xprt * xprt = task - > tk_xprt ;
unsigned long cwnd = xprt - > cwnd ;
2005-04-17 02:20:36 +04:00
if ( result > = 0 & & cwnd < = xprt - > cong ) {
/* The (cwnd >> 1) term makes sure
* the result gets rounded properly . */
cwnd + = ( RPC_CWNDSCALE * RPC_CWNDSCALE + ( cwnd > > 1 ) ) / cwnd ;
if ( cwnd > RPC_MAXCWND ( xprt ) )
cwnd = RPC_MAXCWND ( xprt ) ;
2005-08-26 03:25:51 +04:00
__xprt_lock_write_next_cong ( xprt ) ;
2005-04-17 02:20:36 +04:00
} else if ( result = = - ETIMEDOUT ) {
cwnd > > = 1 ;
if ( cwnd < RPC_CWNDSCALE )
cwnd = RPC_CWNDSCALE ;
}
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: cong %ld, cwnd was %ld, now %ld \n " ,
2005-04-17 02:20:36 +04:00
xprt - > cong , xprt - > cwnd , cwnd ) ;
xprt - > cwnd = cwnd ;
2005-08-26 03:25:52 +04:00
__xprt_put_cong ( xprt , req ) ;
2005-04-17 02:20:36 +04:00
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_adjust_cwnd ) ;
2005-04-17 02:20:36 +04:00
2005-08-12 00:25:44 +04:00
/**
* xprt_wake_pending_tasks - wake all tasks on a transport ' s pending queue
* @ xprt : transport with waiting tasks
* @ status : result code to plant in each task before waking it
*
*/
void xprt_wake_pending_tasks ( struct rpc_xprt * xprt , int status )
{
if ( status < 0 )
rpc_wake_up_status ( & xprt - > pending , status ) ;
else
rpc_wake_up ( & xprt - > pending ) ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_wake_pending_tasks ) ;
2005-08-12 00:25:44 +04:00
2005-08-12 00:25:50 +04:00
/**
* xprt_wait_for_buffer_space - wait for transport output buffer to clear
* @ task : task to be put to sleep
2008-04-27 09:59:02 +04:00
* @ action : function pointer to be executed after wait
2005-08-12 00:25:50 +04:00
*/
2008-04-18 02:52:19 +04:00
void xprt_wait_for_buffer_space ( struct rpc_task * task , rpc_action action )
2005-08-12 00:25:50 +04:00
{
struct rpc_rqst * req = task - > tk_rqstp ;
struct rpc_xprt * xprt = req - > rq_xprt ;
task - > tk_timeout = req - > rq_timeout ;
2008-04-18 02:52:19 +04:00
rpc_sleep_on ( & xprt - > pending , task , action ) ;
2005-08-12 00:25:50 +04:00
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_wait_for_buffer_space ) ;
2005-08-12 00:25:50 +04:00
/**
* xprt_write_space - wake the task waiting for transport output buffer space
* @ xprt : transport with waiting tasks
*
* Can be called in a soft IRQ context , so xprt_write_space never sleeps .
*/
void xprt_write_space ( struct rpc_xprt * xprt )
{
if ( unlikely ( xprt - > shutdown ) )
return ;
spin_lock_bh ( & xprt - > transport_lock ) ;
if ( xprt - > snd_task ) {
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: write space: waking waiting task on "
" xprt %p \n " , xprt ) ;
2008-02-23 00:34:12 +03:00
rpc_wake_up_queued_task ( & xprt - > pending , xprt - > snd_task ) ;
2005-08-12 00:25:50 +04:00
}
spin_unlock_bh ( & xprt - > transport_lock ) ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_write_space ) ;
2005-08-12 00:25:50 +04:00
2005-08-26 03:25:50 +04:00
/**
* xprt_set_retrans_timeout_def - set a request ' s retransmit timeout
* @ task : task whose timeout is to be set
*
* Set a request ' s retransmit timeout based on the transport ' s
* default timeout parameters . Used by transports that don ' t adjust
* the retransmit timeout based on round - trip time estimation .
*/
void xprt_set_retrans_timeout_def ( struct rpc_task * task )
{
task - > tk_timeout = task - > tk_rqstp - > rq_timeout ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_set_retrans_timeout_def ) ;
2005-08-26 03:25:50 +04:00
/*
* xprt_set_retrans_timeout_rtt - set a request ' s retransmit timeout
* @ task : task whose timeout is to be set
2007-02-10 02:38:13 +03:00
*
2005-08-26 03:25:50 +04:00
* Set a request ' s retransmit timeout using the RTT estimator .
*/
void xprt_set_retrans_timeout_rtt ( struct rpc_task * task )
{
int timer = task - > tk_msg . rpc_proc - > p_timer ;
2007-12-21 00:03:55 +03:00
struct rpc_clnt * clnt = task - > tk_client ;
struct rpc_rtt * rtt = clnt - > cl_rtt ;
2005-08-26 03:25:50 +04:00
struct rpc_rqst * req = task - > tk_rqstp ;
2007-12-21 00:03:55 +03:00
unsigned long max_timeout = clnt - > cl_timeout - > to_maxval ;
2005-08-26 03:25:50 +04:00
task - > tk_timeout = rpc_calc_rto ( rtt , timer ) ;
task - > tk_timeout < < = rpc_ntimeo ( rtt , timer ) + req - > rq_retries ;
if ( task - > tk_timeout > max_timeout | | task - > tk_timeout = = 0 )
task - > tk_timeout = max_timeout ;
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_set_retrans_timeout_rtt ) ;
2005-08-26 03:25:50 +04:00
2005-04-17 02:20:36 +04:00
static void xprt_reset_majortimeo ( struct rpc_rqst * req )
{
2007-12-21 00:03:55 +03:00
const struct rpc_timeout * to = req - > rq_task - > tk_client - > cl_timeout ;
2005-04-17 02:20:36 +04:00
req - > rq_majortimeo = req - > rq_timeout ;
if ( to - > to_exponential )
req - > rq_majortimeo < < = to - > to_retries ;
else
req - > rq_majortimeo + = to - > to_increment * to - > to_retries ;
if ( req - > rq_majortimeo > to - > to_maxval | | req - > rq_majortimeo = = 0 )
req - > rq_majortimeo = to - > to_maxval ;
req - > rq_majortimeo + = jiffies ;
}
2005-08-12 00:25:26 +04:00
/**
* xprt_adjust_timeout - adjust timeout values for next retransmit
* @ req : RPC request containing parameters to use for the adjustment
*
2005-04-17 02:20:36 +04:00
*/
int xprt_adjust_timeout ( struct rpc_rqst * req )
{
struct rpc_xprt * xprt = req - > rq_xprt ;
2007-12-21 00:03:55 +03:00
const struct rpc_timeout * to = req - > rq_task - > tk_client - > cl_timeout ;
2005-04-17 02:20:36 +04:00
int status = 0 ;
if ( time_before ( jiffies , req - > rq_majortimeo ) ) {
if ( to - > to_exponential )
req - > rq_timeout < < = 1 ;
else
req - > rq_timeout + = to - > to_increment ;
if ( to - > to_maxval & & req - > rq_timeout > = to - > to_maxval )
req - > rq_timeout = to - > to_maxval ;
req - > rq_retries + + ;
} else {
req - > rq_timeout = to - > to_initval ;
req - > rq_retries = 0 ;
xprt_reset_majortimeo ( req ) ;
/* Reset the RTT counters == "slow start" */
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
rpc_init_rtt ( req - > rq_task - > tk_client - > cl_rtt , to - > to_initval ) ;
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
status = - ETIMEDOUT ;
}
if ( req - > rq_timeout = = 0 ) {
printk ( KERN_WARNING " xprt_adjust_timeout: rq_timeout = 0! \n " ) ;
req - > rq_timeout = 5 * HZ ;
}
return status ;
}
2006-11-22 17:55:48 +03:00
static void xprt_autoclose ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2006-11-22 17:55:48 +03:00
struct rpc_xprt * xprt =
container_of ( work , struct rpc_xprt , task_cleanup ) ;
2005-04-17 02:20:36 +04:00
2005-08-12 00:25:23 +04:00
xprt - > ops - > close ( xprt ) ;
2007-11-06 18:18:36 +03:00
clear_bit ( XPRT_CLOSE_WAIT , & xprt - > state ) ;
2005-04-17 02:20:36 +04:00
xprt_release_write ( xprt , NULL ) ;
}
2005-08-12 00:25:26 +04:00
/**
2007-11-07 02:44:20 +03:00
* xprt_disconnect_done - mark a transport as disconnected
2005-08-12 00:25:26 +04:00
* @ xprt : transport to flag for disconnect
*
2005-04-17 02:20:36 +04:00
*/
2007-11-07 02:44:20 +03:00
void xprt_disconnect_done ( struct rpc_xprt * xprt )
2005-04-17 02:20:36 +04:00
{
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: disconnected transport %p \n " , xprt ) ;
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
xprt_clear_connected ( xprt ) ;
2009-03-11 21:38:00 +03:00
xprt_wake_pending_tasks ( xprt , - EAGAIN ) ;
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
}
2007-11-07 02:44:20 +03:00
EXPORT_SYMBOL_GPL ( xprt_disconnect_done ) ;
2005-04-17 02:20:36 +04:00
2007-11-06 18:18:36 +03:00
/**
* xprt_force_disconnect - force a transport to disconnect
* @ xprt : transport to disconnect
*
*/
void xprt_force_disconnect ( struct rpc_xprt * xprt )
{
/* Don't race with the test_bit() in xprt_clear_locked() */
spin_lock_bh ( & xprt - > transport_lock ) ;
set_bit ( XPRT_CLOSE_WAIT , & xprt - > state ) ;
/* Try to schedule an autoclose RPC call */
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) = = 0 )
queue_work ( rpciod_workqueue , & xprt - > task_cleanup ) ;
2009-03-11 21:38:00 +03:00
xprt_wake_pending_tasks ( xprt , - EAGAIN ) ;
2007-11-06 18:18:36 +03:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
}
2008-04-18 00:52:57 +04:00
/**
* xprt_conditional_disconnect - force a transport to disconnect
* @ xprt : transport to disconnect
* @ cookie : ' connection cookie '
*
* This attempts to break the connection if and only if ' cookie ' matches
* the current transport ' connection cookie ' . It ensures that we don ' t
* try to break the connection more than once when we need to retransmit
* a batch of RPC requests .
*
*/
void xprt_conditional_disconnect ( struct rpc_xprt * xprt , unsigned int cookie )
{
/* Don't race with the test_bit() in xprt_clear_locked() */
spin_lock_bh ( & xprt - > transport_lock ) ;
if ( cookie ! = xprt - > connect_cookie )
goto out ;
if ( test_bit ( XPRT_CLOSING , & xprt - > state ) | | ! xprt_connected ( xprt ) )
goto out ;
set_bit ( XPRT_CLOSE_WAIT , & xprt - > state ) ;
/* Try to schedule an autoclose RPC call */
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) = = 0 )
queue_work ( rpciod_workqueue , & xprt - > task_cleanup ) ;
2009-03-11 21:38:00 +03:00
xprt_wake_pending_tasks ( xprt , - EAGAIN ) ;
2008-04-18 00:52:57 +04:00
out :
spin_unlock_bh ( & xprt - > transport_lock ) ;
}
2005-04-17 02:20:36 +04:00
static void
xprt_init_autodisconnect ( unsigned long data )
{
struct rpc_xprt * xprt = ( struct rpc_xprt * ) data ;
2005-08-12 00:25:32 +04:00
spin_lock ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
if ( ! list_empty ( & xprt - > recv ) | | xprt - > shutdown )
goto out_abort ;
2005-08-12 00:25:38 +04:00
if ( test_and_set_bit ( XPRT_LOCKED , & xprt - > state ) )
2005-04-17 02:20:36 +04:00
goto out_abort ;
2005-08-12 00:25:32 +04:00
spin_unlock ( & xprt - > transport_lock ) ;
2009-04-22 01:18:20 +04:00
set_bit ( XPRT_CONNECTION_CLOSE , & xprt - > state ) ;
queue_work ( rpciod_workqueue , & xprt - > task_cleanup ) ;
2005-04-17 02:20:36 +04:00
return ;
out_abort :
2005-08-12 00:25:32 +04:00
spin_unlock ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
}
2005-08-12 00:25:26 +04:00
/**
* xprt_connect - schedule a transport connect operation
* @ task : RPC task that is requesting the connect
2005-04-17 02:20:36 +04:00
*
*/
void xprt_connect ( struct rpc_task * task )
{
struct rpc_xprt * xprt = task - > tk_xprt ;
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_connect xprt %p %s connected \n " , task - > tk_pid ,
2005-04-17 02:20:36 +04:00
xprt , ( xprt_connected ( xprt ) ? " is " : " is not " ) ) ;
2006-08-23 04:06:15 +04:00
if ( ! xprt_bound ( xprt ) ) {
2009-03-11 21:09:39 +03:00
task - > tk_status = - EAGAIN ;
2005-04-17 02:20:36 +04:00
return ;
}
if ( ! xprt_lock_write ( xprt , task ) )
return ;
2009-12-03 16:10:17 +03:00
if ( test_and_clear_bit ( XPRT_CLOSE_WAIT , & xprt - > state ) )
xprt - > ops - > close ( xprt ) ;
2005-04-17 02:20:36 +04:00
if ( xprt_connected ( xprt ) )
2005-08-12 00:25:23 +04:00
xprt_release_write ( xprt , task ) ;
else {
if ( task - > tk_rqstp )
task - > tk_rqstp - > rq_bytes_sent = 0 ;
2005-04-17 02:20:36 +04:00
2010-04-17 00:42:12 +04:00
task - > tk_timeout = task - > tk_rqstp - > rq_timeout ;
2008-02-23 00:34:17 +03:00
rpc_sleep_on ( & xprt - > pending , task , xprt_connect_status ) ;
2010-04-17 00:41:57 +04:00
if ( test_bit ( XPRT_CLOSING , & xprt - > state ) )
return ;
if ( xprt_test_and_set_connecting ( xprt ) )
return ;
2006-03-20 21:44:16 +03:00
xprt - > stat . connect_start = jiffies ;
2005-08-12 00:25:23 +04:00
xprt - > ops - > connect ( task ) ;
2005-04-17 02:20:36 +04:00
}
}
2005-08-12 00:25:26 +04:00
static void xprt_connect_status ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
struct rpc_xprt * xprt = task - > tk_xprt ;
2008-06-12 01:56:13 +04:00
if ( task - > tk_status = = 0 ) {
2006-03-20 21:44:16 +03:00
xprt - > stat . connect_count + + ;
xprt - > stat . connect_time + = ( long ) jiffies - xprt - > stat . connect_start ;
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_connect_status: connection established \n " ,
2005-04-17 02:20:36 +04:00
task - > tk_pid ) ;
return ;
}
switch ( task - > tk_status ) {
2009-03-11 21:38:00 +03:00
case - EAGAIN :
dprintk ( " RPC: %5u xprt_connect_status: retrying \n " , task - > tk_pid ) ;
2005-08-12 00:25:08 +04:00
break ;
2005-04-17 02:20:36 +04:00
case - ETIMEDOUT :
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_connect_status: connect attempt timed "
" out \n " , task - > tk_pid ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_connect_status: error %d connecting to "
" server %s \n " , task - > tk_pid , - task - > tk_status ,
task - > tk_client - > cl_server ) ;
2005-08-12 00:25:08 +04:00
xprt_release_write ( xprt , task ) ;
task - > tk_status = - EIO ;
2005-04-17 02:20:36 +04:00
}
}
2005-08-12 00:25:26 +04:00
/**
* xprt_lookup_rqst - find an RPC request corresponding to an XID
* @ xprt : transport on which the original request was transmitted
* @ xid : RPC XID of incoming reply
*
2005-04-17 02:20:36 +04:00
*/
2006-09-27 09:29:38 +04:00
struct rpc_rqst * xprt_lookup_rqst ( struct rpc_xprt * xprt , __be32 xid )
2005-04-17 02:20:36 +04:00
{
struct list_head * pos ;
list_for_each ( pos , & xprt - > recv ) {
struct rpc_rqst * entry = list_entry ( pos , struct rpc_rqst , rq_list ) ;
2006-03-20 21:44:16 +03:00
if ( entry - > rq_xid = = xid )
return entry ;
2005-04-17 02:20:36 +04:00
}
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: xprt_lookup_rqst did not find xid %08x \n " ,
ntohl ( xid ) ) ;
2006-03-20 21:44:16 +03:00
xprt - > stat . bad_xids + + ;
return NULL ;
2005-04-17 02:20:36 +04:00
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_lookup_rqst ) ;
2005-04-17 02:20:36 +04:00
2010-05-07 21:34:27 +04:00
static void xprt_update_rtt ( struct rpc_task * task )
2005-08-26 03:25:52 +04:00
{
struct rpc_rqst * req = task - > tk_rqstp ;
struct rpc_rtt * rtt = task - > tk_client - > cl_rtt ;
unsigned timer = task - > tk_msg . rpc_proc - > p_timer ;
2010-05-13 20:51:49 +04:00
long m = usecs_to_jiffies ( ktime_to_us ( req - > rq_rtt ) ) ;
2005-08-26 03:25:52 +04:00
if ( timer ) {
if ( req - > rq_ntrans = = 1 )
2010-05-07 21:34:47 +04:00
rpc_update_rtt ( rtt , timer , m ) ;
2005-08-26 03:25:52 +04:00
rpc_set_timeo ( rtt , timer , req - > rq_ntrans - 1 ) ;
}
}
2005-08-12 00:25:26 +04:00
/**
* xprt_complete_rqst - called when reply processing is complete
2005-08-26 03:25:52 +04:00
* @ task : RPC request that recently completed
2005-08-12 00:25:26 +04:00
* @ copied : actual number of bytes received from the transport
*
2005-08-26 03:25:52 +04:00
* Caller holds transport lock .
2005-04-17 02:20:36 +04:00
*/
2005-08-26 03:25:52 +04:00
void xprt_complete_rqst ( struct rpc_task * task , int copied )
2005-04-17 02:20:36 +04:00
{
2005-08-26 03:25:52 +04:00
struct rpc_rqst * req = task - > tk_rqstp ;
2008-02-23 00:34:12 +03:00
struct rpc_xprt * xprt = req - > rq_xprt ;
2005-04-17 02:20:36 +04:00
2005-08-26 03:25:52 +04:00
dprintk ( " RPC: %5u xid %08x complete (%d bytes received) \n " ,
task - > tk_pid , ntohl ( req - > rq_xid ) , copied ) ;
2005-04-17 02:20:36 +04:00
2008-02-23 00:34:12 +03:00
xprt - > stat . recvs + + ;
2010-05-13 20:51:49 +04:00
req - > rq_rtt = ktime_sub ( ktime_get ( ) , req - > rq_xtime ) ;
2010-05-07 21:34:27 +04:00
if ( xprt - > ops - > timer ! = NULL )
xprt_update_rtt ( task ) ;
2006-03-20 21:44:17 +03:00
2005-04-17 02:20:36 +04:00
list_del_init ( & req - > rq_list ) ;
2008-03-21 23:19:41 +03:00
req - > rq_private_buf . len = copied ;
2009-04-01 17:23:28 +04:00
/* Ensure all writes are done before we update */
/* req->rq_reply_bytes_recvd */
2006-03-20 21:44:51 +03:00
smp_wmb ( ) ;
2009-04-01 17:23:28 +04:00
req - > rq_reply_bytes_recvd = copied ;
2008-02-23 00:34:12 +03:00
rpc_wake_up_queued_task ( & xprt - > pending , task ) ;
2005-04-17 02:20:36 +04:00
}
2007-09-10 21:45:36 +04:00
EXPORT_SYMBOL_GPL ( xprt_complete_rqst ) ;
2005-04-17 02:20:36 +04:00
2005-08-26 03:25:52 +04:00
static void xprt_timer ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
2005-08-26 03:25:52 +04:00
struct rpc_rqst * req = task - > tk_rqstp ;
2005-04-17 02:20:36 +04:00
struct rpc_xprt * xprt = req - > rq_xprt ;
2008-02-23 00:34:17 +03:00
if ( task - > tk_status ! = - ETIMEDOUT )
return ;
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_timer \n " , task - > tk_pid ) ;
2005-04-17 02:20:36 +04:00
2008-02-23 00:34:17 +03:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2009-04-01 17:23:28 +04:00
if ( ! req - > rq_reply_bytes_recvd ) {
2005-08-26 03:25:52 +04:00
if ( xprt - > ops - > timer )
xprt - > ops - > timer ( task ) ;
2008-02-23 00:34:17 +03:00
} else
task - > tk_status = 0 ;
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
}
2009-09-10 18:32:28 +04:00
static inline int xprt_has_timer ( struct rpc_xprt * xprt )
{
return xprt - > idle_timeout ! = 0 ;
}
2005-08-12 00:25:26 +04:00
/**
* xprt_prepare_transmit - reserve the transport before sending a request
* @ task : RPC task about to send a request
*
2005-04-17 02:20:36 +04:00
*/
2005-08-12 00:25:26 +04:00
int xprt_prepare_transmit ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
struct rpc_rqst * req = task - > tk_rqstp ;
struct rpc_xprt * xprt = req - > rq_xprt ;
int err = 0 ;
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_prepare_transmit \n " , task - > tk_pid ) ;
2005-04-17 02:20:36 +04:00
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2009-04-01 17:23:28 +04:00
if ( req - > rq_reply_bytes_recvd & & ! req - > rq_bytes_sent ) {
err = req - > rq_reply_bytes_recvd ;
2005-04-17 02:20:36 +04:00
goto out_unlock ;
}
2009-03-11 21:38:00 +03:00
if ( ! xprt - > ops - > reserve_xprt ( task ) )
2005-04-17 02:20:36 +04:00
err = - EAGAIN ;
out_unlock :
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
return err ;
}
2006-07-28 01:22:50 +04:00
void xprt_end_transmit ( struct rpc_task * task )
2005-10-19 01:20:11 +04:00
{
2009-04-01 17:23:17 +04:00
xprt_release_write ( task - > tk_rqstp - > rq_xprt , task ) ;
2005-10-19 01:20:11 +04:00
}
2005-08-12 00:25:26 +04:00
/**
* xprt_transmit - send an RPC request on a transport
* @ task : controlling RPC task
*
* We have to copy the iovec because sendmsg fiddles with its contents .
*/
void xprt_transmit ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
struct rpc_rqst * req = task - > tk_rqstp ;
struct rpc_xprt * xprt = req - > rq_xprt ;
2005-08-12 00:25:23 +04:00
int status ;
2005-04-17 02:20:36 +04:00
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u xprt_transmit(%u) \n " , task - > tk_pid , req - > rq_slen ) ;
2005-04-17 02:20:36 +04:00
2009-04-01 17:23:28 +04:00
if ( ! req - > rq_reply_bytes_recvd ) {
2009-04-01 17:23:03 +04:00
if ( list_empty ( & req - > rq_list ) & & rpc_reply_expected ( task ) ) {
/*
* Add to the list only if we ' re expecting a reply
*/
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
/* Update the softirq receive buffer */
memcpy ( & req - > rq_private_buf , & req - > rq_rcv_buf ,
sizeof ( req - > rq_private_buf ) ) ;
/* Add request to the receive list */
list_add_tail ( & req - > rq_list , & xprt - > recv ) ;
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
xprt_reset_majortimeo ( req ) ;
2005-06-22 21:16:28 +04:00
/* Turn off autodisconnect */
del_singleshot_timer_sync ( & xprt - > timer ) ;
2005-04-17 02:20:36 +04:00
}
} else if ( ! req - > rq_bytes_sent )
return ;
2008-04-18 00:52:57 +04:00
req - > rq_connect_cookie = xprt - > connect_cookie ;
2010-05-07 21:34:47 +04:00
req - > rq_xtime = ktime_get ( ) ;
2005-08-12 00:25:23 +04:00
status = xprt - > ops - > send_request ( task ) ;
2009-03-11 21:37:59 +03:00
if ( status ! = 0 ) {
task - > tk_status = status ;
return ;
}
2006-03-20 21:44:16 +03:00
2009-03-11 21:37:59 +03:00
dprintk ( " RPC: %5u xmit complete \n " , task - > tk_pid ) ;
spin_lock_bh ( & xprt - > transport_lock ) ;
2006-03-20 21:44:16 +03:00
2009-03-11 21:37:59 +03:00
xprt - > ops - > set_retrans_timeout ( task ) ;
2006-03-20 21:44:16 +03:00
2009-03-11 21:37:59 +03:00
xprt - > stat . sends + + ;
xprt - > stat . req_u + = xprt - > stat . sends - xprt - > stat . recvs ;
xprt - > stat . bklog_u + = xprt - > backlog . qlen ;
2005-04-17 02:20:36 +04:00
2009-03-11 21:37:59 +03:00
/* Don't race with disconnect */
if ( ! xprt_connected ( xprt ) )
task - > tk_status = - ENOTCONN ;
2009-04-01 17:23:28 +04:00
else if ( ! req - > rq_reply_bytes_recvd & & rpc_reply_expected ( task ) ) {
2009-04-01 17:23:03 +04:00
/*
* Sleep on the pending queue since
* we ' re expecting a reply .
*/
2009-03-11 21:37:59 +03:00
rpc_sleep_on ( & xprt - > pending , task , xprt_timer ) ;
2009-04-01 17:23:03 +04:00
}
2009-03-11 21:37:59 +03:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2005-04-17 02:20:36 +04:00
}
2010-04-17 00:37:01 +04:00
static void xprt_alloc_slot ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
struct rpc_xprt * xprt = task - > tk_xprt ;
task - > tk_status = 0 ;
if ( task - > tk_rqstp )
return ;
if ( ! list_empty ( & xprt - > free ) ) {
struct rpc_rqst * req = list_entry ( xprt - > free . next , struct rpc_rqst , rq_list ) ;
list_del_init ( & req - > rq_list ) ;
task - > tk_rqstp = req ;
xprt_request_init ( task , xprt ) ;
return ;
}
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: waiting for request slot \n " ) ;
2005-04-17 02:20:36 +04:00
task - > tk_status = - EAGAIN ;
task - > tk_timeout = 0 ;
2008-02-23 00:34:17 +03:00
rpc_sleep_on ( & xprt - > backlog , task , NULL ) ;
2005-04-17 02:20:36 +04:00
}
2010-04-17 00:37:01 +04:00
static void xprt_free_slot ( struct rpc_xprt * xprt , struct rpc_rqst * req )
{
memset ( req , 0 , sizeof ( * req ) ) ; /* mark unused */
spin_lock ( & xprt - > reserve_lock ) ;
list_add ( & req - > rq_list , & xprt - > free ) ;
rpc_wake_up_next ( & xprt - > backlog ) ;
spin_unlock ( & xprt - > reserve_lock ) ;
}
2005-08-12 00:25:26 +04:00
/**
* xprt_reserve - allocate an RPC request slot
* @ task : RPC task requesting a slot allocation
*
* If no more slots are available , place the task on the transport ' s
* backlog queue .
*/
void xprt_reserve ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
struct rpc_xprt * xprt = task - > tk_xprt ;
task - > tk_status = - EIO ;
2006-01-03 11:55:56 +03:00
spin_lock ( & xprt - > reserve_lock ) ;
2010-04-17 00:37:01 +04:00
xprt_alloc_slot ( task ) ;
2006-01-03 11:55:56 +03:00
spin_unlock ( & xprt - > reserve_lock ) ;
2005-04-17 02:20:36 +04:00
}
2006-09-27 09:29:38 +04:00
static inline __be32 xprt_alloc_xid ( struct rpc_xprt * xprt )
2005-04-17 02:20:36 +04:00
{
2010-04-21 06:06:52 +04:00
return ( __force __be32 ) xprt - > xid + + ;
2005-04-17 02:20:36 +04:00
}
static inline void xprt_init_xid ( struct rpc_xprt * xprt )
{
2006-05-25 09:40:51 +04:00
xprt - > xid = net_random ( ) ;
2005-04-17 02:20:36 +04:00
}
2005-08-12 00:25:26 +04:00
static void xprt_request_init ( struct rpc_task * task , struct rpc_xprt * xprt )
2005-04-17 02:20:36 +04:00
{
struct rpc_rqst * req = task - > tk_rqstp ;
2007-12-21 00:03:55 +03:00
req - > rq_timeout = task - > tk_client - > cl_timeout - > to_initval ;
2005-04-17 02:20:36 +04:00
req - > rq_task = task ;
req - > rq_xprt = xprt ;
2006-01-03 11:55:49 +03:00
req - > rq_buffer = NULL ;
2005-04-17 02:20:36 +04:00
req - > rq_xid = xprt_alloc_xid ( xprt ) ;
2005-10-14 00:54:43 +04:00
req - > rq_release_snd_buf = NULL ;
2006-08-31 23:44:52 +04:00
xprt_reset_majortimeo ( req ) ;
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u reserved req %p xid %08x \n " , task - > tk_pid ,
2005-04-17 02:20:36 +04:00
req , ntohl ( req - > rq_xid ) ) ;
}
2005-08-12 00:25:26 +04:00
/**
* xprt_release - release an RPC request slot
* @ task : task which is finished with the slot
*
2005-04-17 02:20:36 +04:00
*/
2005-08-12 00:25:26 +04:00
void xprt_release ( struct rpc_task * task )
2005-04-17 02:20:36 +04:00
{
2009-04-01 17:23:03 +04:00
struct rpc_xprt * xprt ;
2005-04-17 02:20:36 +04:00
struct rpc_rqst * req ;
if ( ! ( req = task - > tk_rqstp ) )
return ;
2009-04-01 17:23:03 +04:00
xprt = req - > rq_xprt ;
2006-03-20 21:44:22 +03:00
rpc_count_iostats ( task ) ;
2005-08-12 00:25:32 +04:00
spin_lock_bh ( & xprt - > transport_lock ) ;
2005-08-26 03:25:51 +04:00
xprt - > ops - > release_xprt ( xprt , task ) ;
2005-08-26 03:25:53 +04:00
if ( xprt - > ops - > release_request )
xprt - > ops - > release_request ( task ) ;
2005-04-17 02:20:36 +04:00
if ( ! list_empty ( & req - > rq_list ) )
list_del ( & req - > rq_list ) ;
xprt - > last_used = jiffies ;
2009-09-10 18:32:28 +04:00
if ( list_empty ( & xprt - > recv ) & & xprt_has_timer ( xprt ) )
2005-08-12 00:25:23 +04:00
mod_timer ( & xprt - > timer ,
2005-08-26 03:25:55 +04:00
xprt - > last_used + xprt - > idle_timeout ) ;
2005-08-12 00:25:32 +04:00
spin_unlock_bh ( & xprt - > transport_lock ) ;
2010-04-17 00:37:01 +04:00
if ( req - > rq_buffer )
2009-04-01 17:23:03 +04:00
xprt - > ops - > buf_free ( req - > rq_buffer ) ;
2010-07-31 22:29:08 +04:00
if ( req - > rq_cred ! = NULL )
put_rpccred ( req - > rq_cred ) ;
2005-04-17 02:20:36 +04:00
task - > tk_rqstp = NULL ;
2005-10-14 00:54:43 +04:00
if ( req - > rq_release_snd_buf )
req - > rq_release_snd_buf ( req ) ;
2009-04-01 17:23:03 +04:00
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: %5u release request %p \n " , task - > tk_pid , req ) ;
2010-04-17 00:37:01 +04:00
if ( likely ( ! bc_prealloc ( req ) ) )
xprt_free_slot ( xprt , req ) ;
else
2010-03-19 22:36:22 +03:00
xprt_free_bc_request ( req ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-23 04:06:20 +04:00
/**
* xprt_create_transport - create an RPC transport
2007-07-08 15:08:54 +04:00
* @ args : rpc transport creation arguments
2006-08-23 04:06:20 +04:00
*
*/
2007-09-10 21:47:07 +04:00
struct rpc_xprt * xprt_create_transport ( struct xprt_create * args )
2006-08-23 04:06:20 +04:00
{
struct rpc_xprt * xprt ;
struct rpc_rqst * req ;
2007-09-10 21:46:39 +04:00
struct xprt_class * t ;
2006-08-23 04:06:20 +04:00
2007-09-10 21:46:39 +04:00
spin_lock ( & xprt_list_lock ) ;
list_for_each_entry ( t , & xprt_list , list ) {
2007-09-10 21:47:57 +04:00
if ( t - > ident = = args - > ident ) {
2007-09-10 21:46:39 +04:00
spin_unlock ( & xprt_list_lock ) ;
goto found ;
}
2006-08-23 04:06:20 +04:00
}
2007-09-10 21:46:39 +04:00
spin_unlock ( & xprt_list_lock ) ;
2007-09-10 21:47:57 +04:00
printk ( KERN_ERR " RPC: transport (%d) not supported \n " , args - > ident ) ;
2007-09-10 21:46:39 +04:00
return ERR_PTR ( - EIO ) ;
found :
xprt = t - > setup ( args ) ;
2006-10-17 22:44:27 +04:00
if ( IS_ERR ( xprt ) ) {
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: xprt_create_transport: failed, %ld \n " ,
2006-10-17 22:44:27 +04:00
- PTR_ERR ( xprt ) ) ;
return xprt ;
2006-08-23 04:06:20 +04:00
}
2006-09-05 20:55:57 +04:00
kref_init ( & xprt - > kref ) ;
2006-08-23 04:06:20 +04:00
spin_lock_init ( & xprt - > transport_lock ) ;
spin_lock_init ( & xprt - > reserve_lock ) ;
INIT_LIST_HEAD ( & xprt - > free ) ;
INIT_LIST_HEAD ( & xprt - > recv ) ;
2009-04-01 17:22:59 +04:00
# if defined(CONFIG_NFS_V4_1)
spin_lock_init ( & xprt - > bc_pa_lock ) ;
INIT_LIST_HEAD ( & xprt - > bc_pa_list ) ;
# endif /* CONFIG_NFS_V4_1 */
2006-11-22 17:55:48 +03:00
INIT_WORK ( & xprt - > task_cleanup , xprt_autoclose ) ;
2009-09-10 18:32:28 +04:00
if ( xprt_has_timer ( xprt ) )
setup_timer ( & xprt - > timer , xprt_init_autodisconnect ,
( unsigned long ) xprt ) ;
else
init_timer ( & xprt - > timer ) ;
2006-08-23 04:06:20 +04:00
xprt - > last_used = jiffies ;
xprt - > cwnd = RPC_INITCWND ;
2007-03-30 00:48:04 +04:00
xprt - > bind_index = 0 ;
2006-08-23 04:06:20 +04:00
rpc_init_wait_queue ( & xprt - > binding , " xprt_binding " ) ;
rpc_init_wait_queue ( & xprt - > pending , " xprt_pending " ) ;
rpc_init_wait_queue ( & xprt - > sending , " xprt_sending " ) ;
rpc_init_wait_queue ( & xprt - > resend , " xprt_resend " ) ;
rpc_init_priority_wait_queue ( & xprt - > backlog , " xprt_backlog " ) ;
/* initialize free list */
for ( req = & xprt - > slot [ xprt - > max_reqs - 1 ] ; req > = & xprt - > slot [ 0 ] ; req - - )
list_add ( & req - > rq_list , & xprt - > free ) ;
xprt_init_xid ( xprt ) ;
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: created transport %p with %u slots \n " , xprt ,
2006-08-23 04:06:20 +04:00
xprt - > max_reqs ) ;
return xprt ;
}
2005-08-12 00:25:26 +04:00
/**
* xprt_destroy - destroy an RPC transport , killing off all requests .
2006-09-05 20:55:57 +04:00
* @ kref : kref for the transport to destroy
2005-08-12 00:25:26 +04:00
*
2005-04-17 02:20:36 +04:00
*/
2006-09-05 20:55:57 +04:00
static void xprt_destroy ( struct kref * kref )
2005-04-17 02:20:36 +04:00
{
2006-09-05 20:55:57 +04:00
struct rpc_xprt * xprt = container_of ( kref , struct rpc_xprt , kref ) ;
2007-01-31 20:14:08 +03:00
dprintk ( " RPC: destroying transport %p \n " , xprt ) ;
2006-01-03 11:55:56 +03:00
xprt - > shutdown = 1 ;
del_timer_sync ( & xprt - > timer ) ;
2006-10-17 22:44:27 +04:00
2008-02-23 01:06:55 +03:00
rpc_destroy_wait_queue ( & xprt - > binding ) ;
rpc_destroy_wait_queue ( & xprt - > pending ) ;
rpc_destroy_wait_queue ( & xprt - > sending ) ;
rpc_destroy_wait_queue ( & xprt - > resend ) ;
rpc_destroy_wait_queue ( & xprt - > backlog ) ;
2006-10-17 22:44:27 +04:00
/*
* Tear down transport state and free the rpc_xprt
*/
2005-08-12 00:25:23 +04:00
xprt - > ops - > destroy ( xprt ) ;
2006-09-05 20:55:57 +04:00
}
2005-04-17 02:20:36 +04:00
2006-09-05 20:55:57 +04:00
/**
* xprt_put - release a reference to an RPC transport .
* @ xprt : pointer to the transport
*
*/
void xprt_put ( struct rpc_xprt * xprt )
{
kref_put ( & xprt - > kref , xprt_destroy ) ;
}
/**
* xprt_get - return a reference to an RPC transport .
* @ xprt : pointer to the transport
*
*/
struct rpc_xprt * xprt_get ( struct rpc_xprt * xprt )
{
kref_get ( & xprt - > kref ) ;
return xprt ;
2005-04-17 02:20:36 +04:00
}