2006-01-18 12:30:29 +03:00
/******************************************************************************
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
* * Copyright ( C ) Sistina Software , Inc . 1997 - 2003 All rights reserved .
2007-05-18 17:59:31 +04:00
* * Copyright ( C ) 2004 - 2007 Red Hat , Inc . All rights reserved .
2006-01-18 12:30:29 +03:00
* *
* * This copyrighted material is made available to anyone wishing to use ,
* * modify , copy , or redistribute it subject to the terms and conditions
* * of the GNU General Public License v .2 .
* *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include "dlm_internal.h"
# include "lockspace.h"
# include "member.h"
# include "dir.h"
# include "ast.h"
# include "recover.h"
# include "lowcomms.h"
# include "lock.h"
# include "requestqueue.h"
# include "recoverd.h"
/* If the start for which we're re-enabling locking (seq) has been superseded
2007-09-28 00:53:38 +04:00
by a newer stop ( ls_recover_seq ) , we need to leave locking disabled .
We suspend dlm_recv threads here to avoid the race where dlm_recv a ) sees
locking stopped and b ) adds a message to the requestqueue , but dlm_recoverd
enables locking and clears the requestqueue between a and b . */
2006-01-18 12:30:29 +03:00
static int enable_locking ( struct dlm_ls * ls , uint64_t seq )
{
int error = - EINTR ;
2007-09-28 00:53:38 +04:00
down_write ( & ls - > ls_recv_active ) ;
2006-01-18 12:30:29 +03:00
spin_lock ( & ls - > ls_recover_lock ) ;
if ( ls - > ls_recover_seq = = seq ) {
set_bit ( LSFL_RUNNING , & ls - > ls_flags ) ;
2007-09-28 00:53:38 +04:00
/* unblocks processes waiting to enter the dlm */
2006-01-18 12:30:29 +03:00
up_write ( & ls - > ls_in_recovery ) ;
error = 0 ;
}
spin_unlock ( & ls - > ls_recover_lock ) ;
2007-09-28 00:53:38 +04:00
up_write ( & ls - > ls_recv_active ) ;
2006-01-18 12:30:29 +03:00
return error ;
}
static int ls_recover ( struct dlm_ls * ls , struct dlm_recover * rv )
{
unsigned long start ;
int error , neg = 0 ;
2006-11-29 17:33:48 +03:00
log_debug ( ls , " recover %llx " , ( unsigned long long ) rv - > seq ) ;
2006-01-18 12:30:29 +03:00
2006-01-20 11:47:07 +03:00
mutex_lock ( & ls - > ls_recoverd_active ) ;
2006-01-18 12:30:29 +03:00
/*
* Suspending and resuming dlm_astd ensures that no lkb ' s from this ls
* will be processed by dlm_astd during recovery .
*/
dlm_astd_suspend ( ) ;
dlm_astd_resume ( ) ;
/*
2008-01-16 22:02:31 +03:00
* Free non - master tossed rsb ' s . Master rsb ' s are kept on toss
* list and put on root list to be included in resdir recovery .
2006-01-18 12:30:29 +03:00
*/
2008-01-16 22:02:31 +03:00
dlm_clear_toss_list ( ls ) ;
2006-01-18 12:30:29 +03:00
/*
2008-01-16 22:02:31 +03:00
* This list of root rsb ' s will be the basis of most of the recovery
* routines .
2006-01-18 12:30:29 +03:00
*/
2008-01-16 22:02:31 +03:00
dlm_create_root_list ( ls ) ;
2006-01-18 12:30:29 +03:00
/*
* Add or remove nodes from the lockspace ' s ls_nodes list .
* Also waits for all nodes to complete dlm_recover_members .
*/
error = dlm_recover_members ( ls , rv , & neg ) ;
if ( error ) {
2007-01-09 18:38:39 +03:00
log_debug ( ls , " recover_members failed %d " , error ) ;
2006-01-18 12:30:29 +03:00
goto fail ;
}
start = jiffies ;
/*
* Rebuild our own share of the directory by collecting from all other
* nodes their master rsb names that hash to us .
*/
error = dlm_recover_directory ( ls ) ;
if ( error ) {
2007-01-09 18:38:39 +03:00
log_debug ( ls , " recover_directory failed %d " , error ) ;
2006-01-18 12:30:29 +03:00
goto fail ;
}
/*
* Wait for all nodes to complete directory rebuild .
*/
error = dlm_recover_directory_wait ( ls ) ;
if ( error ) {
2007-01-09 18:38:39 +03:00
log_debug ( ls , " recover_directory_wait failed %d " , error ) ;
2006-01-18 12:30:29 +03:00
goto fail ;
}
/*
* We may have outstanding operations that are waiting for a reply from
* a failed node . Mark these to be resent after recovery . Unlock and
* cancel ops can just be completed .
*/
dlm_recover_waiters_pre ( ls ) ;
error = dlm_recovery_stopped ( ls ) ;
if ( error )
goto fail ;
if ( neg | | dlm_no_directory ( ls ) ) {
/*
* Clear lkb ' s for departed nodes .
*/
dlm_purge_locks ( ls ) ;
/*
* Get new master nodeid ' s for rsb ' s that were mastered on
* departed nodes .
*/
error = dlm_recover_masters ( ls ) ;
if ( error ) {
2007-01-09 18:38:39 +03:00
log_debug ( ls , " recover_masters failed %d " , error ) ;
2006-01-18 12:30:29 +03:00
goto fail ;
}
/*
* Send our locks on remastered rsb ' s to the new masters .
*/
error = dlm_recover_locks ( ls ) ;
if ( error ) {
2007-01-09 18:38:39 +03:00
log_debug ( ls , " recover_locks failed %d " , error ) ;
2006-01-18 12:30:29 +03:00
goto fail ;
}
error = dlm_recover_locks_wait ( ls ) ;
if ( error ) {
2007-01-09 18:38:39 +03:00
log_debug ( ls , " recover_locks_wait failed %d " , error ) ;
2006-01-18 12:30:29 +03:00
goto fail ;
}
/*
* Finalize state in master rsb ' s now that all locks can be
* checked . This includes conversion resolution and lvb
* settings .
*/
dlm_recover_rsbs ( ls ) ;
2006-10-31 20:56:01 +03:00
} else {
/*
* Other lockspace members may be going through the " neg " steps
* while also adding us to the lockspace , in which case they ' ll
2006-11-01 18:31:48 +03:00
* be doing the recover_locks ( RS_LOCKS ) barrier .
2006-10-31 20:56:01 +03:00
*/
dlm_set_recover_status ( ls , DLM_RS_LOCKS ) ;
2006-11-01 18:31:48 +03:00
error = dlm_recover_locks_wait ( ls ) ;
if ( error ) {
2007-01-09 18:38:39 +03:00
log_debug ( ls , " recover_locks_wait failed %d " , error ) ;
2006-11-01 18:31:48 +03:00
goto fail ;
}
2006-01-18 12:30:29 +03:00
}
dlm_release_root_list ( ls ) ;
2006-11-27 20:31:22 +03:00
/*
* Purge directory - related requests that are saved in requestqueue .
* All dir requests from before recovery are invalid now due to the dir
* rebuild and will be resent by the requesting nodes .
*/
dlm_purge_requestqueue ( ls ) ;
2006-01-18 12:30:29 +03:00
dlm_set_recover_status ( ls , DLM_RS_DONE ) ;
error = dlm_recover_done_wait ( ls ) ;
if ( error ) {
2007-01-09 18:38:39 +03:00
log_debug ( ls , " recover_done_wait failed %d " , error ) ;
2006-01-18 12:30:29 +03:00
goto fail ;
}
dlm_clear_members_gone ( ls ) ;
2007-05-18 17:59:31 +04:00
dlm_adjust_timeouts ( ls ) ;
2006-01-18 12:30:29 +03:00
error = enable_locking ( ls , rv - > seq ) ;
if ( error ) {
2007-01-09 18:38:39 +03:00
log_debug ( ls , " enable_locking failed %d " , error ) ;
2006-01-18 12:30:29 +03:00
goto fail ;
}
error = dlm_process_requestqueue ( ls ) ;
if ( error ) {
2007-01-09 18:38:39 +03:00
log_debug ( ls , " process_requestqueue failed %d " , error ) ;
2006-01-18 12:30:29 +03:00
goto fail ;
}
error = dlm_recover_waiters_post ( ls ) ;
if ( error ) {
2007-01-09 18:38:39 +03:00
log_debug ( ls , " recover_waiters_post failed %d " , error ) ;
2006-01-18 12:30:29 +03:00
goto fail ;
}
dlm_grant_after_purge ( ls ) ;
dlm_astd_wake ( ) ;
2006-11-29 17:33:48 +03:00
log_debug ( ls , " recover %llx done: %u ms " ,
( unsigned long long ) rv - > seq ,
2006-01-18 12:30:29 +03:00
jiffies_to_msecs ( jiffies - start ) ) ;
2006-01-20 11:47:07 +03:00
mutex_unlock ( & ls - > ls_recoverd_active ) ;
2006-01-18 12:30:29 +03:00
return 0 ;
fail :
dlm_release_root_list ( ls ) ;
2006-11-29 17:33:48 +03:00
log_debug ( ls , " recover %llx error %d " ,
( unsigned long long ) rv - > seq , error ) ;
2006-01-20 11:47:07 +03:00
mutex_unlock ( & ls - > ls_recoverd_active ) ;
2006-01-18 12:30:29 +03:00
return error ;
}
2006-10-31 20:56:08 +03:00
/* The dlm_ls_start() that created the rv we take here may already have been
stopped via dlm_ls_stop ( ) ; in that case we need to leave the RECOVERY_STOP
flag set . */
2006-01-18 12:30:29 +03:00
static void do_ls_recovery ( struct dlm_ls * ls )
{
struct dlm_recover * rv = NULL ;
spin_lock ( & ls - > ls_recover_lock ) ;
rv = ls - > ls_recover_args ;
ls - > ls_recover_args = NULL ;
2006-10-31 20:56:08 +03:00
if ( rv & & ls - > ls_recover_seq = = rv - > seq )
clear_bit ( LSFL_RECOVERY_STOP , & ls - > ls_flags ) ;
2006-01-18 12:30:29 +03:00
spin_unlock ( & ls - > ls_recover_lock ) ;
if ( rv ) {
ls_recover ( ls , rv ) ;
kfree ( rv - > nodeids ) ;
2008-03-18 22:22:11 +03:00
kfree ( rv - > new ) ;
2006-01-18 12:30:29 +03:00
kfree ( rv ) ;
}
}
static int dlm_recoverd ( void * arg )
{
struct dlm_ls * ls ;
ls = dlm_find_lockspace_local ( arg ) ;
2006-08-24 23:47:20 +04:00
if ( ! ls ) {
log_print ( " dlm_recoverd: no lockspace %p " , arg ) ;
return - 1 ;
}
2006-01-18 12:30:29 +03:00
while ( ! kthread_should_stop ( ) ) {
set_current_state ( TASK_INTERRUPTIBLE ) ;
if ( ! test_bit ( LSFL_WORK , & ls - > ls_flags ) )
schedule ( ) ;
set_current_state ( TASK_RUNNING ) ;
if ( test_and_clear_bit ( LSFL_WORK , & ls - > ls_flags ) )
do_ls_recovery ( ls ) ;
}
dlm_put_lockspace ( ls ) ;
return 0 ;
}
void dlm_recoverd_kick ( struct dlm_ls * ls )
{
set_bit ( LSFL_WORK , & ls - > ls_flags ) ;
wake_up_process ( ls - > ls_recoverd_task ) ;
}
int dlm_recoverd_start ( struct dlm_ls * ls )
{
struct task_struct * p ;
int error = 0 ;
p = kthread_run ( dlm_recoverd , ls , " dlm_recoverd " ) ;
if ( IS_ERR ( p ) )
error = PTR_ERR ( p ) ;
else
ls - > ls_recoverd_task = p ;
return error ;
}
void dlm_recoverd_stop ( struct dlm_ls * ls )
{
kthread_stop ( ls - > ls_recoverd_task ) ;
}
void dlm_recoverd_suspend ( struct dlm_ls * ls )
{
2006-08-09 02:06:07 +04:00
wake_up ( & ls - > ls_wait_general ) ;
2006-01-20 11:47:07 +03:00
mutex_lock ( & ls - > ls_recoverd_active ) ;
2006-01-18 12:30:29 +03:00
}
void dlm_recoverd_resume ( struct dlm_ls * ls )
{
2006-01-20 11:47:07 +03:00
mutex_unlock ( & ls - > ls_recoverd_active ) ;
2006-01-18 12:30:29 +03:00
}