2019-05-20 19:08:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2018-10-20 00:57:59 +01:00
/* AFS vlserver probing
*
* Copyright ( C ) 2018 Red Hat , Inc . All Rights Reserved .
* Written by David Howells ( dhowells @ redhat . com )
*/
# include <linux/sched.h>
# include <linux/slab.h>
# include "afs_fs.h"
# include "internal.h"
# include "protocol_yfs.h"
2020-08-19 15:27:17 +01:00
/*
* Handle the completion of a set of probes .
*/
static void afs_finished_vl_probe ( struct afs_vlserver * server )
2018-10-20 00:57:59 +01:00
{
2020-08-19 15:27:17 +01:00
if ( ! ( server - > probe . flags & AFS_VLSERVER_PROBE_RESPONDED ) ) {
server - > rtt = UINT_MAX ;
clear_bit ( AFS_VLSERVER_FL_RESPONDING , & server - > flags ) ;
}
2018-10-20 00:57:59 +01:00
clear_bit_unlock ( AFS_VLSERVER_FL_PROBING , & server - > flags ) ;
wake_up_bit ( & server - > flags , AFS_VLSERVER_FL_PROBING ) ;
2020-08-19 15:27:17 +01:00
}
/*
* Handle the completion of a probe RPC call .
*/
static void afs_done_one_vl_probe ( struct afs_vlserver * server , bool wake_up )
{
if ( atomic_dec_and_test ( & server - > probe_outstanding ) ) {
afs_finished_vl_probe ( server ) ;
wake_up = true ;
}
if ( wake_up )
wake_up_all ( & server - > probe_wq ) ;
2018-10-20 00:57:59 +01:00
}
/*
* Process the result of probing a vlserver . This is called after successful
* or failed delivery of an VL . GetCapabilities operation .
*/
void afs_vlserver_probe_result ( struct afs_call * call )
{
2023-10-31 16:30:37 +00:00
struct afs_addr_list * alist = call - > vl_probe ;
2019-05-09 22:22:50 +01:00
struct afs_vlserver * server = call - > vlserver ;
2023-10-20 16:13:03 +01:00
struct afs_address * addr = & alist - > addrs [ call - > probe_index ] ;
2019-05-09 22:22:50 +01:00
unsigned int server_index = call - > server_index ;
2020-05-11 14:54:34 +01:00
unsigned int rtt_us = 0 ;
2023-10-20 16:13:03 +01:00
unsigned int index = call - > probe_index ;
2018-10-20 00:57:59 +01:00
bool have_result = false ;
int ret = call - > error ;
_enter ( " %s,%u,%u,%d,%d " , server - > name , server_index , index , ret , call - > abort_code ) ;
spin_lock ( & server - > probe_lock ) ;
switch ( ret ) {
case 0 :
server - > probe . error = 0 ;
goto responded ;
case - ECONNABORTED :
2020-08-20 15:01:54 +01:00
if ( ! ( server - > probe . flags & AFS_VLSERVER_PROBE_RESPONDED ) ) {
2018-10-20 00:57:59 +01:00
server - > probe . abort_code = call - > abort_code ;
server - > probe . error = ret ;
}
goto responded ;
case - ENOMEM :
case - ENONET :
2020-08-19 15:27:17 +01:00
case - EKEYEXPIRED :
case - EKEYREVOKED :
case - EKEYREJECTED :
2020-08-20 15:01:54 +01:00
server - > probe . flags | = AFS_VLSERVER_PROBE_LOCAL_FAILURE ;
2020-08-19 15:27:17 +01:00
if ( server - > probe . error = = 0 )
server - > probe . error = ret ;
trace_afs_io_error ( call - > debug_id , ret , afs_io_error_vl_probe_fail ) ;
2018-10-20 00:57:59 +01:00
goto out ;
case - ECONNRESET : /* Responded, but call expired. */
2018-11-13 23:20:28 +00:00
case - ERFKILL :
case - EADDRNOTAVAIL :
2018-10-20 00:57:59 +01:00
case - ENETUNREACH :
case - EHOSTUNREACH :
2018-11-13 23:20:28 +00:00
case - EHOSTDOWN :
2018-10-20 00:57:59 +01:00
case - ECONNREFUSED :
case - ETIMEDOUT :
case - ETIME :
default :
clear_bit ( index , & alist - > responded ) ;
2023-10-20 14:12:42 +01:00
set_bit ( index , & alist - > probe_failed ) ;
2020-08-20 15:01:54 +01:00
if ( ! ( server - > probe . flags & AFS_VLSERVER_PROBE_RESPONDED ) & &
2018-10-20 00:57:59 +01:00
( server - > probe . error = = 0 | |
server - > probe . error = = - ETIMEDOUT | |
server - > probe . error = = - ETIME ) )
server - > probe . error = ret ;
2020-08-19 15:27:17 +01:00
trace_afs_io_error ( call - > debug_id , ret , afs_io_error_vl_probe_fail ) ;
2018-10-20 00:57:59 +01:00
goto out ;
}
responded :
set_bit ( index , & alist - > responded ) ;
2023-10-20 14:12:42 +01:00
clear_bit ( index , & alist - > probe_failed ) ;
2018-10-20 00:57:59 +01:00
if ( call - > service_id = = YFS_VL_SERVICE ) {
2020-08-20 15:01:54 +01:00
server - > probe . flags | = AFS_VLSERVER_PROBE_IS_YFS ;
2018-10-20 00:57:59 +01:00
set_bit ( AFS_VLSERVER_FL_IS_YFS , & server - > flags ) ;
2023-10-26 18:13:13 +01:00
server - > service_id = call - > service_id ;
2018-10-20 00:57:59 +01:00
} else {
2020-08-20 15:01:54 +01:00
server - > probe . flags | = AFS_VLSERVER_PROBE_NOT_YFS ;
if ( ! ( server - > probe . flags & AFS_VLSERVER_PROBE_IS_YFS ) ) {
2018-10-20 00:57:59 +01:00
clear_bit ( AFS_VLSERVER_FL_IS_YFS , & server - > flags ) ;
2023-10-26 18:13:13 +01:00
server - > service_id = call - > service_id ;
2018-10-20 00:57:59 +01:00
}
}
rxrpc, afs: Allow afs to pin rxrpc_peer objects
Change rxrpc's API such that:
(1) A new function, rxrpc_kernel_lookup_peer(), is provided to look up an
rxrpc_peer record for a remote address and a corresponding function,
rxrpc_kernel_put_peer(), is provided to dispose of it again.
(2) When setting up a call, the rxrpc_peer object used during a call is
now passed in rather than being set up by rxrpc_connect_call(). For
afs, this meenat passing it to rxrpc_kernel_begin_call() rather than
the full address (the service ID then has to be passed in as a
separate parameter).
(3) A new function, rxrpc_kernel_remote_addr(), is added so that afs can
get a pointer to the transport address for display purposed, and
another, rxrpc_kernel_remote_srx(), to gain a pointer to the full
rxrpc address.
(4) The function to retrieve the RTT from a call, rxrpc_kernel_get_srtt(),
is then altered to take a peer. This now returns the RTT or -1 if
there are insufficient samples.
(5) Rename rxrpc_kernel_get_peer() to rxrpc_kernel_call_get_peer().
(6) Provide a new function, rxrpc_kernel_get_peer(), to get a ref on a
peer the caller already has.
This allows the afs filesystem to pin the rxrpc_peer records that it is
using, allowing faster lookups and pointer comparisons rather than
comparing sockaddr_rxrpc contents. It also makes it easier to get hold of
the RTT. The following changes are made to afs:
(1) The addr_list struct's addrs[] elements now hold a peer struct pointer
and a service ID rather than a sockaddr_rxrpc.
(2) When displaying the transport address, rxrpc_kernel_remote_addr() is
used.
(3) The port arg is removed from afs_alloc_addrlist() since it's always
overridden.
(4) afs_merge_fs_addr4() and afs_merge_fs_addr6() do peer lookup and may
now return an error that must be handled.
(5) afs_find_server() now takes a peer pointer to specify the address.
(6) afs_find_server(), afs_compare_fs_alists() and afs_merge_fs_addr[46]{}
now do peer pointer comparison rather than address comparison.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-10-19 12:55:11 +01:00
rtt_us = rxrpc_kernel_get_srtt ( addr - > peer ) ;
2023-06-16 22:39:39 +01:00
if ( rtt_us < server - > probe . rtt ) {
2020-05-11 14:54:34 +01:00
server - > probe . rtt = rtt_us ;
2020-08-19 15:27:17 +01:00
server - > rtt = rtt_us ;
2018-10-20 00:57:59 +01:00
alist - > preferred = index ;
}
smp_wmb ( ) ; /* Set rtt before responded. */
2020-08-20 15:01:54 +01:00
server - > probe . flags | = AFS_VLSERVER_PROBE_RESPONDED ;
2018-10-20 00:57:59 +01:00
set_bit ( AFS_VLSERVER_FL_PROBED , & server - > flags ) ;
2020-08-19 15:27:17 +01:00
set_bit ( AFS_VLSERVER_FL_RESPONDING , & server - > flags ) ;
have_result = true ;
2018-10-20 00:57:59 +01:00
out :
spin_unlock ( & server - > probe_lock ) ;
2023-10-30 11:53:16 +00:00
trace_afs_vl_probe ( server , false , alist , index , call - > error , call - > abort_code , rtt_us ) ;
rxrpc, afs: Allow afs to pin rxrpc_peer objects
Change rxrpc's API such that:
(1) A new function, rxrpc_kernel_lookup_peer(), is provided to look up an
rxrpc_peer record for a remote address and a corresponding function,
rxrpc_kernel_put_peer(), is provided to dispose of it again.
(2) When setting up a call, the rxrpc_peer object used during a call is
now passed in rather than being set up by rxrpc_connect_call(). For
afs, this meenat passing it to rxrpc_kernel_begin_call() rather than
the full address (the service ID then has to be passed in as a
separate parameter).
(3) A new function, rxrpc_kernel_remote_addr(), is added so that afs can
get a pointer to the transport address for display purposed, and
another, rxrpc_kernel_remote_srx(), to gain a pointer to the full
rxrpc address.
(4) The function to retrieve the RTT from a call, rxrpc_kernel_get_srtt(),
is then altered to take a peer. This now returns the RTT or -1 if
there are insufficient samples.
(5) Rename rxrpc_kernel_get_peer() to rxrpc_kernel_call_get_peer().
(6) Provide a new function, rxrpc_kernel_get_peer(), to get a ref on a
peer the caller already has.
This allows the afs filesystem to pin the rxrpc_peer records that it is
using, allowing faster lookups and pointer comparisons rather than
comparing sockaddr_rxrpc contents. It also makes it easier to get hold of
the RTT. The following changes are made to afs:
(1) The addr_list struct's addrs[] elements now hold a peer struct pointer
and a service ID rather than a sockaddr_rxrpc.
(2) When displaying the transport address, rxrpc_kernel_remote_addr() is
used.
(3) The port arg is removed from afs_alloc_addrlist() since it's always
overridden.
(4) afs_merge_fs_addr4() and afs_merge_fs_addr6() do peer lookup and may
now return an error that must be handled.
(5) afs_find_server() now takes a peer pointer to specify the address.
(6) afs_find_server(), afs_compare_fs_alists() and afs_merge_fs_addr[46]{}
now do peer pointer comparison rather than address comparison.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-10-19 12:55:11 +01:00
_debug ( " probe [%u][%u] %pISpc rtt=%d ret=%d " ,
server_index , index , rxrpc_kernel_remote_addr ( addr - > peer ) ,
rtt_us , ret ) ;
2018-10-20 00:57:59 +01:00
2020-08-19 15:27:17 +01:00
afs_done_one_vl_probe ( server , have_result ) ;
2018-10-20 00:57:59 +01:00
}
/*
* Probe all of a vlserver ' s addresses to find out the best route and to
* query its capabilities .
*/
2018-11-13 23:20:28 +00:00
static bool afs_do_probe_vlserver ( struct afs_net * net ,
struct afs_vlserver * server ,
struct key * key ,
unsigned int server_index ,
struct afs_error * _e )
2018-10-20 00:57:59 +01:00
{
2023-10-20 16:13:03 +01:00
struct afs_addr_list * alist ;
2019-04-25 14:26:50 +01:00
struct afs_call * call ;
2023-10-30 11:53:16 +00:00
unsigned long unprobed ;
unsigned int index , i ;
2018-11-13 23:20:28 +00:00
bool in_progress = false ;
2023-10-30 11:53:16 +00:00
int best_prio ;
2018-10-20 00:57:59 +01:00
_enter ( " %s " , server - > name ) ;
read_lock ( & server - > lock ) ;
2023-10-20 16:13:03 +01:00
alist = rcu_dereference_protected ( server - > addresses ,
lockdep_is_held ( & server - > lock ) ) ;
afs_get_addrlist ( alist , afs_alist_trace_get_vlprobe ) ;
2018-10-20 00:57:59 +01:00
read_unlock ( & server - > lock ) ;
2023-10-20 16:13:03 +01:00
atomic_set ( & server - > probe_outstanding , alist - > nr_addrs ) ;
2018-10-20 00:57:59 +01:00
memset ( & server - > probe , 0 , sizeof ( server - > probe ) ) ;
server - > probe . rtt = UINT_MAX ;
2023-10-30 11:53:16 +00:00
unprobed = ( 1UL < < alist - > nr_addrs ) - 1 ;
while ( unprobed ) {
best_prio = - 1 ;
index = 0 ;
for ( i = 0 ; i < alist - > nr_addrs ; i + + ) {
if ( test_bit ( i , & unprobed ) & &
alist - > addrs [ i ] . prio > best_prio ) {
index = i ;
best_prio = alist - > addrs [ i ] . prio ;
}
}
__clear_bit ( index , & unprobed ) ;
trace_afs_vl_probe ( server , true , alist , index , 0 , 0 , 0 ) ;
2023-10-20 16:13:03 +01:00
call = afs_vl_get_capabilities ( net , alist , index , key , server ,
2019-04-25 14:26:50 +01:00
server_index ) ;
if ( ! IS_ERR ( call ) ) {
2023-10-25 17:53:33 +01:00
afs_prioritise_error ( _e , call - > error , call - > abort_code ) ;
2019-04-25 14:26:50 +01:00
afs_put_call ( call ) ;
2018-11-13 23:20:28 +00:00
in_progress = true ;
2019-04-25 14:26:50 +01:00
} else {
2023-10-25 17:53:33 +01:00
afs_prioritise_error ( _e , PTR_ERR ( call ) , 0 ) ;
2020-08-19 15:27:17 +01:00
afs_done_one_vl_probe ( server , false ) ;
2019-04-25 14:26:50 +01:00
}
2018-10-20 00:57:59 +01:00
}
2023-10-20 16:13:03 +01:00
afs_put_addrlist ( alist , afs_alist_trace_put_vlprobe ) ;
2018-11-13 23:20:28 +00:00
return in_progress ;
2018-10-20 00:57:59 +01:00
}
/*
* Send off probes to all unprobed servers .
*/
int afs_send_vl_probes ( struct afs_net * net , struct key * key ,
struct afs_vlserver_list * vllist )
{
struct afs_vlserver * server ;
2023-10-25 17:53:33 +01:00
struct afs_error e = { } ;
2018-11-13 23:20:28 +00:00
bool in_progress = false ;
int i ;
2018-10-20 00:57:59 +01:00
for ( i = 0 ; i < vllist - > nr_servers ; i + + ) {
server = vllist - > servers [ i ] . server ;
if ( test_bit ( AFS_VLSERVER_FL_PROBED , & server - > flags ) )
continue ;
2018-11-13 23:20:28 +00:00
if ( ! test_and_set_bit_lock ( AFS_VLSERVER_FL_PROBING , & server - > flags ) & &
afs_do_probe_vlserver ( net , server , key , i , & e ) )
in_progress = true ;
2018-10-20 00:57:59 +01:00
}
2018-11-13 23:20:28 +00:00
return in_progress ? 0 : e . error ;
2018-10-20 00:57:59 +01:00
}
/*
* Wait for the first as - yet untried server to respond .
*/
int afs_wait_for_vl_probes ( struct afs_vlserver_list * vllist ,
unsigned long untried )
{
struct wait_queue_entry * waits ;
struct afs_vlserver * server ;
2020-08-19 15:27:17 +01:00
unsigned int rtt = UINT_MAX , rtt_s ;
2018-10-20 00:57:59 +01:00
bool have_responders = false ;
int pref = - 1 , i ;
_enter ( " %u,%lx " , vllist - > nr_servers , untried ) ;
/* Only wait for servers that have a probe outstanding. */
for ( i = 0 ; i < vllist - > nr_servers ; i + + ) {
if ( test_bit ( i , & untried ) ) {
server = vllist - > servers [ i ] . server ;
if ( ! test_bit ( AFS_VLSERVER_FL_PROBING , & server - > flags ) )
__clear_bit ( i , & untried ) ;
2020-08-20 15:01:54 +01:00
if ( server - > probe . flags & AFS_VLSERVER_PROBE_RESPONDED )
2018-10-20 00:57:59 +01:00
have_responders = true ;
}
}
if ( have_responders | | ! untried )
return 0 ;
waits = kmalloc ( array_size ( vllist - > nr_servers , sizeof ( * waits ) ) , GFP_KERNEL ) ;
if ( ! waits )
return - ENOMEM ;
for ( i = 0 ; i < vllist - > nr_servers ; i + + ) {
if ( test_bit ( i , & untried ) ) {
server = vllist - > servers [ i ] . server ;
init_waitqueue_entry ( & waits [ i ] , current ) ;
add_wait_queue ( & server - > probe_wq , & waits [ i ] ) ;
}
}
for ( ; ; ) {
bool still_probing = false ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
for ( i = 0 ; i < vllist - > nr_servers ; i + + ) {
if ( test_bit ( i , & untried ) ) {
server = vllist - > servers [ i ] . server ;
2020-08-20 15:01:54 +01:00
if ( server - > probe . flags & AFS_VLSERVER_PROBE_RESPONDED )
2018-10-20 00:57:59 +01:00
goto stop ;
if ( test_bit ( AFS_VLSERVER_FL_PROBING , & server - > flags ) )
still_probing = true ;
}
}
2019-01-03 15:28:58 -08:00
if ( ! still_probing | | signal_pending ( current ) )
2018-10-20 00:57:59 +01:00
goto stop ;
schedule ( ) ;
}
stop :
set_current_state ( TASK_RUNNING ) ;
for ( i = 0 ; i < vllist - > nr_servers ; i + + ) {
if ( test_bit ( i , & untried ) ) {
server = vllist - > servers [ i ] . server ;
2020-08-19 15:27:17 +01:00
rtt_s = READ_ONCE ( server - > rtt ) ;
if ( test_bit ( AFS_VLSERVER_FL_RESPONDING , & server - > flags ) & &
rtt_s < rtt ) {
2018-10-20 00:57:59 +01:00
pref = i ;
2020-08-19 15:27:17 +01:00
rtt = rtt_s ;
2018-10-20 00:57:59 +01:00
}
remove_wait_queue ( & server - > probe_wq , & waits [ i ] ) ;
}
}
kfree ( waits ) ;
if ( pref = = - 1 & & signal_pending ( current ) )
return - ERESTARTSYS ;
if ( pref > = 0 )
vllist - > preferred = pref ;
_leave ( " = 0 [%u] " , pref ) ;
return 0 ;
}