2014-03-07 11:21:15 -06:00
/*
* Copyright ( C ) 2015 , SUSE
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 , or ( at your option )
* any later version .
*
*/
# include <linux/module.h>
2014-03-07 13:49:26 -06:00
# include <linux/dlm.h>
# include <linux/sched.h>
2014-10-29 18:51:31 -05:00
# include <linux/raid/md_p.h>
2014-03-07 13:49:26 -06:00
# include "md.h"
2014-06-07 00:45:22 -05:00
# include "bitmap.h"
2014-03-29 10:01:53 -05:00
# include "md-cluster.h"
2014-03-07 13:49:26 -06:00
# define LVB_SIZE 64
2014-10-29 18:51:31 -05:00
# define NEW_DEV_TIMEOUT 5000
2014-03-07 13:49:26 -06:00
struct dlm_lock_resource {
dlm_lockspace_t * ls ;
struct dlm_lksb lksb ;
char * name ; /* lock name. */
uint32_t flags ; /* flags to pass to dlm_lock() */
struct completion completion ; /* completion for synchronized locking */
2014-03-29 10:20:02 -05:00
void ( * bast ) ( void * arg , int mode ) ; /* blocking AST function pointer*/
struct mddev * mddev ; /* pointing back to mddev. */
2015-10-01 13:20:27 -05:00
int mode ;
2014-03-29 10:20:02 -05:00
} ;
2014-06-06 12:35:34 -05:00
struct suspend_info {
int slot ;
sector_t lo ;
sector_t hi ;
struct list_head list ;
} ;
struct resync_info {
__le64 lo ;
__le64 hi ;
} ;
2015-03-02 10:55:49 -06:00
/* md_cluster_info flags */
# define MD_CLUSTER_WAITING_FOR_NEWDISK 1
2015-06-24 09:30:32 -05:00
# define MD_CLUSTER_SUSPEND_READ_BALANCING 2
2015-07-10 17:01:21 +08:00
# define MD_CLUSTER_BEGIN_JOIN_CLUSTER 3
2015-03-02 10:55:49 -06:00
2014-03-29 10:20:02 -05:00
struct md_cluster_info {
/* dlm lock space and resources for clustered raid. */
dlm_lockspace_t * lockspace ;
2014-03-30 00:42:49 -05:00
int slot_number ;
struct completion completion ;
2014-03-29 10:20:02 -05:00
struct mutex sb_mutex ;
2014-06-06 12:12:32 -05:00
struct dlm_lock_resource * bitmap_lockres ;
2015-09-30 13:20:35 -05:00
struct dlm_lock_resource * resync_lockres ;
2014-06-06 12:35:34 -05:00
struct list_head suspend_list ;
spinlock_t suspend_lock ;
2014-06-07 00:45:22 -05:00
struct md_thread * recovery_thread ;
unsigned long recovery_map ;
2014-06-07 01:08:29 -05:00
/* communication loc resources */
struct dlm_lock_resource * ack_lockres ;
struct dlm_lock_resource * message_lockres ;
struct dlm_lock_resource * token_lockres ;
2014-10-29 18:51:31 -05:00
struct dlm_lock_resource * no_new_dev_lockres ;
2014-06-07 01:08:29 -05:00
struct md_thread * recv_thread ;
2014-10-29 18:51:31 -05:00
struct completion newdisk_completion ;
2015-03-02 10:55:49 -06:00
unsigned long state ;
2014-06-07 01:08:29 -05:00
} ;
enum msg_type {
METADATA_UPDATED = 0 ,
RESYNCING ,
2014-10-29 18:51:31 -05:00
NEWDISK ,
2015-04-14 10:44:44 -05:00
REMOVE ,
2015-04-14 10:45:42 -05:00
RE_ADD ,
2015-07-10 16:54:04 +08:00
BITMAP_NEEDS_SYNC ,
2014-06-07 01:08:29 -05:00
} ;
struct cluster_msg {
int type ;
int slot ;
2014-10-29 18:51:31 -05:00
/* TODO: Unionize this for smaller footprint */
2014-06-07 01:08:29 -05:00
sector_t low ;
sector_t high ;
2014-10-29 18:51:31 -05:00
char uuid [ 16 ] ;
int raid_slot ;
2014-03-07 13:49:26 -06:00
} ;
static void sync_ast ( void * arg )
{
struct dlm_lock_resource * res ;
res = ( struct dlm_lock_resource * ) arg ;
complete ( & res - > completion ) ;
}
static int dlm_lock_sync ( struct dlm_lock_resource * res , int mode )
{
int ret = 0 ;
ret = dlm_lock ( res - > ls , mode , & res - > lksb ,
res - > flags , res - > name , strlen ( res - > name ) ,
0 , sync_ast , res , res - > bast ) ;
if ( ret )
return ret ;
wait_for_completion ( & res - > completion ) ;
2015-10-01 13:20:27 -05:00
if ( res - > lksb . sb_status = = 0 )
res - > mode = mode ;
2014-03-07 13:49:26 -06:00
return res - > lksb . sb_status ;
}
static int dlm_unlock_sync ( struct dlm_lock_resource * res )
{
return dlm_lock_sync ( res , DLM_LOCK_NL ) ;
}
2014-03-29 10:20:02 -05:00
static struct dlm_lock_resource * lockres_init ( struct mddev * mddev ,
2014-03-07 13:49:26 -06:00
char * name , void ( * bastfn ) ( void * arg , int mode ) , int with_lvb )
{
struct dlm_lock_resource * res = NULL ;
int ret , namelen ;
2014-03-29 10:20:02 -05:00
struct md_cluster_info * cinfo = mddev - > cluster_info ;
2014-03-07 13:49:26 -06:00
res = kzalloc ( sizeof ( struct dlm_lock_resource ) , GFP_KERNEL ) ;
if ( ! res )
return NULL ;
2015-07-10 17:01:16 +08:00
init_completion ( & res - > completion ) ;
2014-03-29 10:20:02 -05:00
res - > ls = cinfo - > lockspace ;
res - > mddev = mddev ;
2015-10-01 13:20:27 -05:00
res - > mode = DLM_LOCK_IV ;
2014-03-07 13:49:26 -06:00
namelen = strlen ( name ) ;
res - > name = kzalloc ( namelen + 1 , GFP_KERNEL ) ;
if ( ! res - > name ) {
pr_err ( " md-cluster: Unable to allocate resource name for resource %s \n " , name ) ;
goto out_err ;
}
strlcpy ( res - > name , name , namelen + 1 ) ;
if ( with_lvb ) {
res - > lksb . sb_lvbptr = kzalloc ( LVB_SIZE , GFP_KERNEL ) ;
if ( ! res - > lksb . sb_lvbptr ) {
pr_err ( " md-cluster: Unable to allocate LVB for resource %s \n " , name ) ;
goto out_err ;
}
res - > flags = DLM_LKF_VALBLK ;
}
if ( bastfn )
res - > bast = bastfn ;
res - > flags | = DLM_LKF_EXPEDITE ;
ret = dlm_lock_sync ( res , DLM_LOCK_NL ) ;
if ( ret ) {
pr_err ( " md-cluster: Unable to lock NL on new lock resource %s \n " , name ) ;
goto out_err ;
}
res - > flags & = ~ DLM_LKF_EXPEDITE ;
res - > flags | = DLM_LKF_CONVERT ;
return res ;
out_err :
kfree ( res - > lksb . sb_lvbptr ) ;
kfree ( res - > name ) ;
kfree ( res ) ;
return NULL ;
}
static void lockres_free ( struct dlm_lock_resource * res )
{
2015-07-10 17:01:17 +08:00
int ret ;
2014-03-07 13:49:26 -06:00
if ( ! res )
return ;
2015-07-10 17:01:17 +08:00
/* cancel a lock request or a conversion request that is blocked */
res - > flags | = DLM_LKF_CANCEL ;
retry :
ret = dlm_unlock ( res - > ls , res - > lksb . sb_lkid , 0 , & res - > lksb , res ) ;
if ( unlikely ( ret ! = 0 ) ) {
pr_info ( " %s: failed to unlock %s return %d \n " , __func__ , res - > name , ret ) ;
/* if a lock conversion is cancelled, then the lock is put
* back to grant queue , need to ensure it is unlocked */
if ( ret = = - DLM_ECANCEL )
goto retry ;
}
res - > flags & = ~ DLM_LKF_CANCEL ;
2014-03-07 13:49:26 -06:00
wait_for_completion ( & res - > completion ) ;
kfree ( res - > name ) ;
kfree ( res - > lksb . sb_lvbptr ) ;
kfree ( res ) ;
}
2014-03-07 11:21:15 -06:00
2014-06-06 12:35:34 -05:00
static void add_resync_info ( struct mddev * mddev , struct dlm_lock_resource * lockres ,
sector_t lo , sector_t hi )
{
struct resync_info * ri ;
ri = ( struct resync_info * ) lockres - > lksb . sb_lvbptr ;
ri - > lo = cpu_to_le64 ( lo ) ;
ri - > hi = cpu_to_le64 ( hi ) ;
}
static struct suspend_info * read_resync_info ( struct mddev * mddev , struct dlm_lock_resource * lockres )
{
struct resync_info ri ;
struct suspend_info * s = NULL ;
sector_t hi = 0 ;
dlm_lock_sync ( lockres , DLM_LOCK_CR ) ;
memcpy ( & ri , lockres - > lksb . sb_lvbptr , sizeof ( struct resync_info ) ) ;
hi = le64_to_cpu ( ri . hi ) ;
if ( ri . hi > 0 ) {
s = kzalloc ( sizeof ( struct suspend_info ) , GFP_KERNEL ) ;
if ( ! s )
goto out ;
s - > hi = hi ;
s - > lo = le64_to_cpu ( ri . lo ) ;
}
dlm_unlock_sync ( lockres ) ;
out :
return s ;
}
2015-02-28 07:04:37 +08:00
static void recover_bitmaps ( struct md_thread * thread )
2014-06-07 00:45:22 -05:00
{
struct mddev * mddev = thread - > mddev ;
struct md_cluster_info * cinfo = mddev - > cluster_info ;
struct dlm_lock_resource * bm_lockres ;
char str [ 64 ] ;
int slot , ret ;
struct suspend_info * s , * tmp ;
sector_t lo , hi ;
while ( cinfo - > recovery_map ) {
slot = fls64 ( ( u64 ) cinfo - > recovery_map ) - 1 ;
/* Clear suspend_area associated with the bitmap */
spin_lock_irq ( & cinfo - > suspend_lock ) ;
list_for_each_entry_safe ( s , tmp , & cinfo - > suspend_list , list )
if ( slot = = s - > slot ) {
list_del ( & s - > list ) ;
kfree ( s ) ;
}
spin_unlock_irq ( & cinfo - > suspend_lock ) ;
snprintf ( str , 64 , " bitmap%04d " , slot ) ;
bm_lockres = lockres_init ( mddev , str , NULL , 1 ) ;
if ( ! bm_lockres ) {
pr_err ( " md-cluster: Cannot initialize bitmaps \n " ) ;
goto clear_bit ;
}
ret = dlm_lock_sync ( bm_lockres , DLM_LOCK_PW ) ;
if ( ret ) {
pr_err ( " md-cluster: Could not DLM lock %s: %d \n " ,
str , ret ) ;
goto clear_bit ;
}
2015-04-14 10:45:42 -05:00
ret = bitmap_copy_from_slot ( mddev , slot , & lo , & hi , true ) ;
2014-06-07 00:52:29 -05:00
if ( ret ) {
2014-06-07 00:45:22 -05:00
pr_err ( " md-cluster: Could not copy data from bitmap %d \n " , slot ) ;
2014-06-07 00:52:29 -05:00
goto dlm_unlock ;
}
if ( hi > 0 ) {
/* TODO:Wait for current resync to get over */
set_bit ( MD_RECOVERY_NEEDED , & mddev - > recovery ) ;
if ( lo < mddev - > recovery_cp )
mddev - > recovery_cp = lo ;
md_check_recovery ( mddev ) ;
}
dlm_unlock :
2014-06-07 00:45:22 -05:00
dlm_unlock_sync ( bm_lockres ) ;
clear_bit :
clear_bit ( slot , & cinfo - > recovery_map ) ;
}
}
2014-03-30 00:42:49 -05:00
static void recover_prep ( void * arg )
{
2015-06-24 09:30:32 -05:00
struct mddev * mddev = arg ;
struct md_cluster_info * cinfo = mddev - > cluster_info ;
set_bit ( MD_CLUSTER_SUSPEND_READ_BALANCING , & cinfo - > state ) ;
2014-03-30 00:42:49 -05:00
}
2015-07-10 16:54:03 +08:00
static void __recover_slot ( struct mddev * mddev , int slot )
2014-03-30 00:42:49 -05:00
{
struct md_cluster_info * cinfo = mddev - > cluster_info ;
2015-07-10 16:54:03 +08:00
set_bit ( slot , & cinfo - > recovery_map ) ;
2014-06-07 00:45:22 -05:00
if ( ! cinfo - > recovery_thread ) {
cinfo - > recovery_thread = md_register_thread ( recover_bitmaps ,
mddev , " recover " ) ;
if ( ! cinfo - > recovery_thread ) {
pr_warn ( " md-cluster: Could not create recovery thread \n " ) ;
return ;
}
}
md_wakeup_thread ( cinfo - > recovery_thread ) ;
2014-03-30 00:42:49 -05:00
}
2015-07-10 16:54:03 +08:00
static void recover_slot ( void * arg , struct dlm_slot * slot )
{
struct mddev * mddev = arg ;
struct md_cluster_info * cinfo = mddev - > cluster_info ;
pr_info ( " md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery. \n " ,
mddev - > bitmap_info . cluster_name ,
slot - > nodeid , slot - > slot ,
cinfo - > slot_number ) ;
/* deduct one since dlm slot starts from one while the num of
* cluster - md begins with 0 */
__recover_slot ( mddev , slot - > slot - 1 ) ;
}
2014-03-30 00:42:49 -05:00
static void recover_done ( void * arg , struct dlm_slot * slots ,
int num_slots , int our_slot ,
uint32_t generation )
{
struct mddev * mddev = arg ;
struct md_cluster_info * cinfo = mddev - > cluster_info ;
cinfo - > slot_number = our_slot ;
2015-07-10 17:01:21 +08:00
/* completion is only need to be complete when node join cluster,
* it doesn ' t need to run during another node ' s failure */
if ( test_bit ( MD_CLUSTER_BEGIN_JOIN_CLUSTER , & cinfo - > state ) ) {
complete ( & cinfo - > completion ) ;
clear_bit ( MD_CLUSTER_BEGIN_JOIN_CLUSTER , & cinfo - > state ) ;
}
2015-06-24 09:30:32 -05:00
clear_bit ( MD_CLUSTER_SUSPEND_READ_BALANCING , & cinfo - > state ) ;
2014-03-30 00:42:49 -05:00
}
2015-07-10 17:01:21 +08:00
/* the ops is called when node join the cluster, and do lock recovery
* if node failure occurs */
2014-03-30 00:42:49 -05:00
static const struct dlm_lockspace_ops md_ls_ops = {
. recover_prep = recover_prep ,
. recover_slot = recover_slot ,
. recover_done = recover_done ,
} ;
2014-06-07 01:08:29 -05:00
/*
* The BAST function for the ack lock resource
* This function wakes up the receive thread in
* order to receive and process the message .
*/
static void ack_bast ( void * arg , int mode )
{
struct dlm_lock_resource * res = ( struct dlm_lock_resource * ) arg ;
struct md_cluster_info * cinfo = res - > mddev - > cluster_info ;
if ( mode = = DLM_LOCK_EX )
md_wakeup_thread ( cinfo - > recv_thread ) ;
}
2014-06-07 02:30:30 -05:00
static void __remove_suspend_info ( struct md_cluster_info * cinfo , int slot )
{
struct suspend_info * s , * tmp ;
list_for_each_entry_safe ( s , tmp , & cinfo - > suspend_list , list )
if ( slot = = s - > slot ) {
list_del ( & s - > list ) ;
kfree ( s ) ;
break ;
}
}
2015-10-09 11:27:01 -05:00
static void remove_suspend_info ( struct mddev * mddev , int slot )
2014-06-07 02:30:30 -05:00
{
2015-10-09 11:27:01 -05:00
struct md_cluster_info * cinfo = mddev - > cluster_info ;
2014-06-07 02:30:30 -05:00
spin_lock_irq ( & cinfo - > suspend_lock ) ;
__remove_suspend_info ( cinfo , slot ) ;
spin_unlock_irq ( & cinfo - > suspend_lock ) ;
2015-10-09 11:27:01 -05:00
mddev - > pers - > quiesce ( mddev , 2 ) ;
2014-06-07 02:30:30 -05:00
}
2015-08-14 12:19:40 -05:00
static void process_suspend_info ( struct mddev * mddev ,
2014-06-07 02:30:30 -05:00
int slot , sector_t lo , sector_t hi )
{
2015-08-14 12:19:40 -05:00
struct md_cluster_info * cinfo = mddev - > cluster_info ;
2014-06-07 02:30:30 -05:00
struct suspend_info * s ;
if ( ! hi ) {
2015-10-09 11:27:01 -05:00
remove_suspend_info ( mddev , slot ) ;
2015-09-30 13:20:35 -05:00
set_bit ( MD_RECOVERY_NEEDED , & mddev - > recovery ) ;
md_wakeup_thread ( mddev - > thread ) ;
2014-06-07 02:30:30 -05:00
return ;
}
s = kzalloc ( sizeof ( struct suspend_info ) , GFP_KERNEL ) ;
if ( ! s )
return ;
s - > slot = slot ;
s - > lo = lo ;
s - > hi = hi ;
2015-08-14 12:19:40 -05:00
mddev - > pers - > quiesce ( mddev , 1 ) ;
mddev - > pers - > quiesce ( mddev , 0 ) ;
2014-06-07 02:30:30 -05:00
spin_lock_irq ( & cinfo - > suspend_lock ) ;
/* Remove existing entry (if exists) before adding */
__remove_suspend_info ( cinfo , slot ) ;
list_add ( & s - > list , & cinfo - > suspend_list ) ;
spin_unlock_irq ( & cinfo - > suspend_lock ) ;
2015-10-09 11:27:01 -05:00
mddev - > pers - > quiesce ( mddev , 2 ) ;
2014-06-07 02:30:30 -05:00
}
2014-10-29 18:51:31 -05:00
static void process_add_new_disk ( struct mddev * mddev , struct cluster_msg * cmsg )
{
char disk_uuid [ 64 ] ;
struct md_cluster_info * cinfo = mddev - > cluster_info ;
char event_name [ ] = " EVENT=ADD_DEVICE " ;
char raid_slot [ 16 ] ;
char * envp [ ] = { event_name , disk_uuid , raid_slot , NULL } ;
int len ;
len = snprintf ( disk_uuid , 64 , " DEVICE_UUID= " ) ;
2015-07-10 16:54:02 +08:00
sprintf ( disk_uuid + len , " %pU " , cmsg - > uuid ) ;
2015-10-12 17:21:21 +08:00
snprintf ( raid_slot , 16 , " RAID_DISK=%d " , le32_to_cpu ( cmsg - > raid_slot ) ) ;
2014-10-29 18:51:31 -05:00
pr_info ( " %s:%d Sending kobject change with %s and %s \n " , __func__ , __LINE__ , disk_uuid , raid_slot ) ;
init_completion ( & cinfo - > newdisk_completion ) ;
2015-03-02 10:55:49 -06:00
set_bit ( MD_CLUSTER_WAITING_FOR_NEWDISK , & cinfo - > state ) ;
2014-10-29 18:51:31 -05:00
kobject_uevent_env ( & disk_to_dev ( mddev - > gendisk ) - > kobj , KOBJ_CHANGE , envp ) ;
wait_for_completion_timeout ( & cinfo - > newdisk_completion ,
NEW_DEV_TIMEOUT ) ;
2015-03-02 10:55:49 -06:00
clear_bit ( MD_CLUSTER_WAITING_FOR_NEWDISK , & cinfo - > state ) ;
2014-10-29 18:51:31 -05:00
}
static void process_metadata_update ( struct mddev * mddev , struct cluster_msg * msg )
{
struct md_cluster_info * cinfo = mddev - > cluster_info ;
2015-08-21 10:33:39 -05:00
md_reload_sb ( mddev , le32_to_cpu ( msg - > raid_slot ) ) ;
2014-10-29 18:51:31 -05:00
dlm_lock_sync ( cinfo - > no_new_dev_lockres , DLM_LOCK_CR ) ;
}
2015-04-14 10:44:44 -05:00
static void process_remove_disk ( struct mddev * mddev , struct cluster_msg * msg )
{
2015-10-12 17:21:21 +08:00
struct md_rdev * rdev = md_find_rdev_nr_rcu ( mddev ,
le32_to_cpu ( msg - > raid_slot ) ) ;
2015-04-14 10:44:44 -05:00
if ( rdev )
md_kick_rdev_from_array ( rdev ) ;
else
2015-10-12 17:21:21 +08:00
pr_warn ( " %s: %d Could not find disk(%d) to REMOVE \n " ,
__func__ , __LINE__ , le32_to_cpu ( msg - > raid_slot ) ) ;
2015-04-14 10:44:44 -05:00
}
2015-04-14 10:45:42 -05:00
static void process_readd_disk ( struct mddev * mddev , struct cluster_msg * msg )
{
2015-10-12 17:21:21 +08:00
struct md_rdev * rdev = md_find_rdev_nr_rcu ( mddev ,
le32_to_cpu ( msg - > raid_slot ) ) ;
2015-04-14 10:45:42 -05:00
if ( rdev & & test_bit ( Faulty , & rdev - > flags ) )
clear_bit ( Faulty , & rdev - > flags ) ;
else
2015-10-12 17:21:21 +08:00
pr_warn ( " %s: %d Could not find disk(%d) which is faulty " ,
__func__ , __LINE__ , le32_to_cpu ( msg - > raid_slot ) ) ;
2015-04-14 10:45:42 -05:00
}
2014-06-07 01:08:29 -05:00
static void process_recvd_msg ( struct mddev * mddev , struct cluster_msg * msg )
{
2015-10-12 17:21:23 +08:00
if ( WARN ( mddev - > cluster_info - > slot_number - 1 = = le32_to_cpu ( msg - > slot ) ,
" node %d received it's own msg \n " , le32_to_cpu ( msg - > slot ) ) )
return ;
2014-06-07 01:08:29 -05:00
switch ( msg - > type ) {
case METADATA_UPDATED :
2014-10-29 18:51:31 -05:00
process_metadata_update ( mddev , msg ) ;
2014-06-07 01:08:29 -05:00
break ;
case RESYNCING :
2015-08-14 12:19:40 -05:00
process_suspend_info ( mddev , msg - > slot ,
2014-06-07 02:30:30 -05:00
msg - > low , msg - > high ) ;
2014-06-07 01:08:29 -05:00
break ;
2014-10-29 18:51:31 -05:00
case NEWDISK :
process_add_new_disk ( mddev , msg ) ;
2015-04-14 10:44:44 -05:00
break ;
case REMOVE :
process_remove_disk ( mddev , msg ) ;
break ;
2015-04-14 10:45:42 -05:00
case RE_ADD :
process_readd_disk ( mddev , msg ) ;
break ;
2015-07-10 16:54:04 +08:00
case BITMAP_NEEDS_SYNC :
__recover_slot ( mddev , msg - > slot ) ;
break ;
2015-04-14 10:44:44 -05:00
default :
pr_warn ( " %s:%d Received unknown message from %d \n " ,
__func__ , __LINE__ , msg - > slot ) ;
2015-02-28 09:16:08 +08:00
}
2014-06-07 01:08:29 -05:00
}
/*
* thread for receiving message
*/
static void recv_daemon ( struct md_thread * thread )
{
struct md_cluster_info * cinfo = thread - > mddev - > cluster_info ;
struct dlm_lock_resource * ack_lockres = cinfo - > ack_lockres ;
struct dlm_lock_resource * message_lockres = cinfo - > message_lockres ;
struct cluster_msg msg ;
2015-07-10 17:01:17 +08:00
int ret ;
2014-06-07 01:08:29 -05:00
/*get CR on Message*/
if ( dlm_lock_sync ( message_lockres , DLM_LOCK_CR ) ) {
pr_err ( " md/raid1:failed to get CR on MESSAGE \n " ) ;
return ;
}
/* read lvb and wake up thread to process this message_lockres */
memcpy ( & msg , message_lockres - > lksb . sb_lvbptr , sizeof ( struct cluster_msg ) ) ;
process_recvd_msg ( thread - > mddev , & msg ) ;
/*release CR on ack_lockres*/
2015-07-10 17:01:17 +08:00
ret = dlm_unlock_sync ( ack_lockres ) ;
if ( unlikely ( ret ! = 0 ) )
pr_info ( " unlock ack failed return %d \n " , ret ) ;
2015-07-10 17:01:15 +08:00
/*up-convert to PR on message_lockres*/
2015-07-10 17:01:17 +08:00
ret = dlm_lock_sync ( message_lockres , DLM_LOCK_PR ) ;
if ( unlikely ( ret ! = 0 ) )
pr_info ( " lock PR on msg failed return %d \n " , ret ) ;
2014-06-07 01:08:29 -05:00
/*get CR on ack_lockres again*/
2015-07-10 17:01:17 +08:00
ret = dlm_lock_sync ( ack_lockres , DLM_LOCK_CR ) ;
if ( unlikely ( ret ! = 0 ) )
pr_info ( " lock CR on ack failed return %d \n " , ret ) ;
2014-06-07 01:08:29 -05:00
/*release CR on message_lockres*/
2015-07-10 17:01:17 +08:00
ret = dlm_unlock_sync ( message_lockres ) ;
if ( unlikely ( ret ! = 0 ) )
pr_info ( " unlock msg failed return %d \n " , ret ) ;
2014-06-07 01:08:29 -05:00
}
2014-06-07 01:28:53 -05:00
/* lock_comm()
* Takes the lock on the TOKEN lock resource so no other
* node can communicate while the operation is underway .
2015-10-01 13:20:27 -05:00
* If called again , and the TOKEN lock is alread in EX mode
* return success . However , care must be taken that unlock_comm ( )
* is called only once .
2014-06-07 01:28:53 -05:00
*/
static int lock_comm ( struct md_cluster_info * cinfo )
{
int error ;
2015-10-01 13:20:27 -05:00
if ( cinfo - > token_lockres - > mode = = DLM_LOCK_EX )
return 0 ;
2014-06-07 01:28:53 -05:00
error = dlm_lock_sync ( cinfo - > token_lockres , DLM_LOCK_EX ) ;
if ( error )
pr_err ( " md-cluster(%s:%d): failed to get EX on TOKEN (%d) \n " ,
__func__ , __LINE__ , error ) ;
return error ;
}
static void unlock_comm ( struct md_cluster_info * cinfo )
{
2015-10-01 13:20:27 -05:00
WARN_ON ( cinfo - > token_lockres - > mode ! = DLM_LOCK_EX ) ;
2014-06-07 01:28:53 -05:00
dlm_unlock_sync ( cinfo - > token_lockres ) ;
}
/* __sendmsg()
* This function performs the actual sending of the message . This function is
* usually called after performing the encompassing operation
* The function :
* 1. Grabs the message lockresource in EX mode
* 2. Copies the message to the message LVB
2015-07-10 17:01:15 +08:00
* 3. Downconverts message lockresource to CW
2014-06-07 01:28:53 -05:00
* 4. Upconverts ack lock resource from CR to EX . This forces the BAST on other nodes
* and the other nodes read the message . The thread will wait here until all other
* nodes have released ack lock resource .
* 5. Downconvert ack lockresource to CR
*/
static int __sendmsg ( struct md_cluster_info * cinfo , struct cluster_msg * cmsg )
{
int error ;
int slot = cinfo - > slot_number - 1 ;
cmsg - > slot = cpu_to_le32 ( slot ) ;
/*get EX on Message*/
error = dlm_lock_sync ( cinfo - > message_lockres , DLM_LOCK_EX ) ;
if ( error ) {
pr_err ( " md-cluster: failed to get EX on MESSAGE (%d) \n " , error ) ;
goto failed_message ;
}
memcpy ( cinfo - > message_lockres - > lksb . sb_lvbptr , ( void * ) cmsg ,
sizeof ( struct cluster_msg ) ) ;
2015-07-10 17:01:15 +08:00
/*down-convert EX to CW on Message*/
error = dlm_lock_sync ( cinfo - > message_lockres , DLM_LOCK_CW ) ;
2014-06-07 01:28:53 -05:00
if ( error ) {
2015-07-10 17:01:15 +08:00
pr_err ( " md-cluster: failed to convert EX to CW on MESSAGE(%d) \n " ,
2014-06-07 01:28:53 -05:00
error ) ;
2015-07-10 17:01:15 +08:00
goto failed_ack ;
2014-06-07 01:28:53 -05:00
}
/*up-convert CR to EX on Ack*/
error = dlm_lock_sync ( cinfo - > ack_lockres , DLM_LOCK_EX ) ;
if ( error ) {
pr_err ( " md-cluster: failed to convert CR to EX on ACK(%d) \n " ,
error ) ;
goto failed_ack ;
}
/*down-convert EX to CR on Ack*/
error = dlm_lock_sync ( cinfo - > ack_lockres , DLM_LOCK_CR ) ;
if ( error ) {
pr_err ( " md-cluster: failed to convert EX to CR on ACK(%d) \n " ,
error ) ;
goto failed_ack ;
}
failed_ack :
2015-07-10 17:01:17 +08:00
error = dlm_unlock_sync ( cinfo - > message_lockres ) ;
if ( unlikely ( error ! = 0 ) ) {
pr_err ( " md-cluster: failed convert to NL on MESSAGE(%d) \n " ,
error ) ;
/* in case the message can't be released due to some reason */
goto failed_ack ;
}
2014-06-07 01:28:53 -05:00
failed_message :
return error ;
}
static int sendmsg ( struct md_cluster_info * cinfo , struct cluster_msg * cmsg )
{
int ret ;
lock_comm ( cinfo ) ;
ret = __sendmsg ( cinfo , cmsg ) ;
unlock_comm ( cinfo ) ;
return ret ;
}
2014-06-06 12:35:34 -05:00
static int gather_all_resync_info ( struct mddev * mddev , int total_slots )
{
struct md_cluster_info * cinfo = mddev - > cluster_info ;
int i , ret = 0 ;
struct dlm_lock_resource * bm_lockres ;
struct suspend_info * s ;
char str [ 64 ] ;
2015-07-10 17:01:22 +08:00
sector_t lo , hi ;
2014-06-06 12:35:34 -05:00
for ( i = 0 ; i < total_slots ; i + + ) {
memset ( str , ' \0 ' , 64 ) ;
snprintf ( str , 64 , " bitmap%04d " , i ) ;
bm_lockres = lockres_init ( mddev , str , NULL , 1 ) ;
if ( ! bm_lockres )
return - ENOMEM ;
if ( i = = ( cinfo - > slot_number - 1 ) )
continue ;
bm_lockres - > flags | = DLM_LKF_NOQUEUE ;
ret = dlm_lock_sync ( bm_lockres , DLM_LOCK_PW ) ;
if ( ret = = - EAGAIN ) {
memset ( bm_lockres - > lksb . sb_lvbptr , ' \0 ' , LVB_SIZE ) ;
s = read_resync_info ( mddev , bm_lockres ) ;
if ( s ) {
pr_info ( " %s:%d Resync[%llu..%llu] in progress on %d \n " ,
__func__ , __LINE__ ,
( unsigned long long ) s - > lo ,
( unsigned long long ) s - > hi , i ) ;
spin_lock_irq ( & cinfo - > suspend_lock ) ;
s - > slot = i ;
list_add ( & s - > list , & cinfo - > suspend_list ) ;
spin_unlock_irq ( & cinfo - > suspend_lock ) ;
}
ret = 0 ;
lockres_free ( bm_lockres ) ;
continue ;
}
2015-07-10 17:01:20 +08:00
if ( ret ) {
lockres_free ( bm_lockres ) ;
2014-06-06 12:35:34 -05:00
goto out ;
2015-07-10 17:01:20 +08:00
}
2015-07-10 17:01:22 +08:00
/* Read the disk bitmap sb and check if it needs recovery */
ret = bitmap_copy_from_slot ( mddev , i , & lo , & hi , false ) ;
if ( ret ) {
pr_warn ( " md-cluster: Could not gather bitmaps from slot %d " , i ) ;
lockres_free ( bm_lockres ) ;
continue ;
}
if ( ( hi > 0 ) & & ( lo < mddev - > recovery_cp ) ) {
set_bit ( MD_RECOVERY_NEEDED , & mddev - > recovery ) ;
mddev - > recovery_cp = lo ;
md_check_recovery ( mddev ) ;
}
2014-06-06 12:35:34 -05:00
dlm_unlock_sync ( bm_lockres ) ;
lockres_free ( bm_lockres ) ;
}
out :
return ret ;
}
2014-03-29 10:01:53 -05:00
static int join ( struct mddev * mddev , int nodes )
{
2014-03-29 10:20:02 -05:00
struct md_cluster_info * cinfo ;
2014-03-30 00:42:49 -05:00
int ret , ops_rv ;
2014-03-29 10:20:02 -05:00
char str [ 64 ] ;
cinfo = kzalloc ( sizeof ( struct md_cluster_info ) , GFP_KERNEL ) ;
if ( ! cinfo )
return - ENOMEM ;
2015-07-10 17:01:18 +08:00
INIT_LIST_HEAD ( & cinfo - > suspend_list ) ;
spin_lock_init ( & cinfo - > suspend_lock ) ;
2014-03-30 00:42:49 -05:00
init_completion ( & cinfo - > completion ) ;
2015-07-10 17:01:21 +08:00
set_bit ( MD_CLUSTER_BEGIN_JOIN_CLUSTER , & cinfo - > state ) ;
2014-03-30 00:42:49 -05:00
mutex_init ( & cinfo - > sb_mutex ) ;
mddev - > cluster_info = cinfo ;
2014-03-29 10:20:02 -05:00
memset ( str , 0 , 64 ) ;
2015-07-10 16:54:02 +08:00
sprintf ( str , " %pU " , mddev - > uuid ) ;
2014-03-30 00:42:49 -05:00
ret = dlm_new_lockspace ( str , mddev - > bitmap_info . cluster_name ,
DLM_LSFL_FS , LVB_SIZE ,
& md_ls_ops , mddev , & ops_rv , & cinfo - > lockspace ) ;
2014-03-29 10:20:02 -05:00
if ( ret )
goto err ;
2014-03-30 00:42:49 -05:00
wait_for_completion ( & cinfo - > completion ) ;
2015-04-21 11:25:52 -05:00
if ( nodes < cinfo - > slot_number ) {
pr_err ( " md-cluster: Slot allotted(%d) is greater than available slots(%d). " ,
cinfo - > slot_number , nodes ) ;
2014-06-06 11:50:56 -05:00
ret = - ERANGE ;
goto err ;
}
2014-06-07 01:08:29 -05:00
/* Initiate the communication resources */
ret = - ENOMEM ;
cinfo - > recv_thread = md_register_thread ( recv_daemon , mddev , " cluster_recv " ) ;
if ( ! cinfo - > recv_thread ) {
pr_err ( " md-cluster: cannot allocate memory for recv_thread! \n " ) ;
goto err ;
}
cinfo - > message_lockres = lockres_init ( mddev , " message " , NULL , 1 ) ;
if ( ! cinfo - > message_lockres )
goto err ;
cinfo - > token_lockres = lockres_init ( mddev , " token " , NULL , 0 ) ;
if ( ! cinfo - > token_lockres )
goto err ;
cinfo - > ack_lockres = lockres_init ( mddev , " ack " , ack_bast , 0 ) ;
if ( ! cinfo - > ack_lockres )
goto err ;
2014-10-29 18:51:31 -05:00
cinfo - > no_new_dev_lockres = lockres_init ( mddev , " no-new-dev " , NULL , 0 ) ;
if ( ! cinfo - > no_new_dev_lockres )
goto err ;
2014-06-07 01:08:29 -05:00
/* get sync CR lock on ACK. */
if ( dlm_lock_sync ( cinfo - > ack_lockres , DLM_LOCK_CR ) )
pr_err ( " md-cluster: failed to get a sync CR lock on ACK!(%d) \n " ,
ret ) ;
2014-10-29 18:51:31 -05:00
/* get sync CR lock on no-new-dev. */
if ( dlm_lock_sync ( cinfo - > no_new_dev_lockres , DLM_LOCK_CR ) )
pr_err ( " md-cluster: failed to get a sync CR lock on no-new-dev!(%d) \n " , ret ) ;
2014-06-06 12:12:32 -05:00
pr_info ( " md-cluster: Joined cluster %s slot %d \n " , str , cinfo - > slot_number ) ;
snprintf ( str , 64 , " bitmap%04d " , cinfo - > slot_number - 1 ) ;
cinfo - > bitmap_lockres = lockres_init ( mddev , str , NULL , 1 ) ;
if ( ! cinfo - > bitmap_lockres )
goto err ;
if ( dlm_lock_sync ( cinfo - > bitmap_lockres , DLM_LOCK_PW ) ) {
pr_err ( " Failed to get bitmap lock \n " ) ;
ret = - EINVAL ;
goto err ;
}
2015-09-30 13:20:35 -05:00
cinfo - > resync_lockres = lockres_init ( mddev , " resync " , NULL , 0 ) ;
if ( ! cinfo - > resync_lockres )
goto err ;
2014-06-06 12:35:34 -05:00
ret = gather_all_resync_info ( mddev , nodes ) ;
if ( ret )
goto err ;
2014-03-29 10:01:53 -05:00
return 0 ;
2014-03-29 10:20:02 -05:00
err :
2014-06-07 01:08:29 -05:00
lockres_free ( cinfo - > message_lockres ) ;
lockres_free ( cinfo - > token_lockres ) ;
lockres_free ( cinfo - > ack_lockres ) ;
2014-10-29 18:51:31 -05:00
lockres_free ( cinfo - > no_new_dev_lockres ) ;
2015-09-30 13:20:35 -05:00
lockres_free ( cinfo - > resync_lockres ) ;
2014-06-06 12:35:34 -05:00
lockres_free ( cinfo - > bitmap_lockres ) ;
2014-03-29 10:20:02 -05:00
if ( cinfo - > lockspace )
dlm_release_lockspace ( cinfo - > lockspace , 2 ) ;
2014-03-30 00:42:49 -05:00
mddev - > cluster_info = NULL ;
2014-03-29 10:20:02 -05:00
kfree ( cinfo ) ;
return ret ;
2014-03-29 10:01:53 -05:00
}
2015-10-01 00:09:18 +08:00
static void resync_bitmap ( struct mddev * mddev )
{
struct md_cluster_info * cinfo = mddev - > cluster_info ;
struct cluster_msg cmsg = { 0 } ;
int err ;
cmsg . type = cpu_to_le32 ( BITMAP_NEEDS_SYNC ) ;
err = sendmsg ( cinfo , & cmsg ) ;
if ( err )
pr_err ( " %s:%d: failed to send BITMAP_NEEDS_SYNC message (%d) \n " ,
__func__ , __LINE__ , err ) ;
}
2014-03-29 10:01:53 -05:00
static int leave ( struct mddev * mddev )
{
2014-03-29 10:20:02 -05:00
struct md_cluster_info * cinfo = mddev - > cluster_info ;
if ( ! cinfo )
return 0 ;
2015-10-01 00:09:18 +08:00
/* BITMAP_NEEDS_SYNC message should be sent when node
* is leaving the cluster with dirty bitmap , also we
* can only deliver it when dlm connection is available */
if ( cinfo - > slot_number > 0 & & mddev - > recovery_cp ! = MaxSector )
resync_bitmap ( mddev ) ;
2014-06-07 00:45:22 -05:00
md_unregister_thread ( & cinfo - > recovery_thread ) ;
2014-06-07 01:08:29 -05:00
md_unregister_thread ( & cinfo - > recv_thread ) ;
lockres_free ( cinfo - > message_lockres ) ;
lockres_free ( cinfo - > token_lockres ) ;
lockres_free ( cinfo - > ack_lockres ) ;
2014-10-29 18:51:31 -05:00
lockres_free ( cinfo - > no_new_dev_lockres ) ;
2014-06-06 12:12:32 -05:00
lockres_free ( cinfo - > bitmap_lockres ) ;
2014-03-29 10:20:02 -05:00
dlm_release_lockspace ( cinfo - > lockspace , 2 ) ;
2014-03-29 10:01:53 -05:00
return 0 ;
}
2014-03-30 00:42:49 -05:00
/* slot_number(): Returns the MD slot number to use
* DLM starts the slot numbers from 1 , wheras cluster - md
* wants the number to be from zero , so we deduct one
*/
static int slot_number ( struct mddev * mddev )
{
struct md_cluster_info * cinfo = mddev - > cluster_info ;
return cinfo - > slot_number - 1 ;
}
2014-06-07 01:44:51 -05:00
static int metadata_update_start ( struct mddev * mddev )
{
return lock_comm ( mddev - > cluster_info ) ;
}
static int metadata_update_finish ( struct mddev * mddev )
{
struct md_cluster_info * cinfo = mddev - > cluster_info ;
struct cluster_msg cmsg ;
2015-08-21 10:33:39 -05:00
struct md_rdev * rdev ;
int ret = 0 ;
2015-10-16 13:48:35 +11:00
int raid_slot = - 1 ;
2014-06-07 01:44:51 -05:00
memset ( & cmsg , 0 , sizeof ( cmsg ) ) ;
cmsg . type = cpu_to_le32 ( METADATA_UPDATED ) ;
2015-08-21 10:33:39 -05:00
/* Pick up a good active device number to send.
*/
rdev_for_each ( rdev , mddev )
if ( rdev - > raid_disk > - 1 & & ! test_bit ( Faulty , & rdev - > flags ) ) {
2015-10-16 13:48:35 +11:00
raid_slot = rdev - > desc_nr ;
2015-08-21 10:33:39 -05:00
break ;
}
2015-10-16 13:48:35 +11:00
if ( raid_slot > = 0 ) {
cmsg . raid_slot = cpu_to_le32 ( raid_slot ) ;
2015-08-21 10:33:39 -05:00
ret = __sendmsg ( cinfo , & cmsg ) ;
2015-10-16 13:48:35 +11:00
} else
2015-08-21 10:33:39 -05:00
pr_warn ( " md-cluster: No good device id found to send \n " ) ;
2014-06-07 01:44:51 -05:00
unlock_comm ( cinfo ) ;
return ret ;
}
2015-10-01 13:20:27 -05:00
static void metadata_update_cancel ( struct mddev * mddev )
2014-06-07 01:44:51 -05:00
{
struct md_cluster_info * cinfo = mddev - > cluster_info ;
2015-10-01 13:20:27 -05:00
unlock_comm ( cinfo ) ;
2014-06-07 01:44:51 -05:00
}
2015-09-30 13:20:35 -05:00
static int resync_start ( struct mddev * mddev )
{
struct md_cluster_info * cinfo = mddev - > cluster_info ;
cinfo - > resync_lockres - > flags | = DLM_LKF_NOQUEUE ;
return dlm_lock_sync ( cinfo - > resync_lockres , DLM_LOCK_EX ) ;
}
2015-08-19 08:14:42 +10:00
static int resync_info_update ( struct mddev * mddev , sector_t lo , sector_t hi )
2014-06-07 02:16:58 -05:00
{
struct md_cluster_info * cinfo = mddev - > cluster_info ;
2015-10-12 17:21:24 +08:00
struct cluster_msg cmsg = { 0 } ;
2014-06-07 02:16:58 -05:00
2015-08-19 08:14:42 +10:00
add_resync_info ( mddev , cinfo - > bitmap_lockres , lo , hi ) ;
/* Re-acquire the lock to refresh LVB */
dlm_lock_sync ( cinfo - > bitmap_lockres , DLM_LOCK_PW ) ;
cmsg . type = cpu_to_le32 ( RESYNCING ) ;
2014-06-07 02:16:58 -05:00
cmsg . low = cpu_to_le64 ( lo ) ;
cmsg . high = cpu_to_le64 ( hi ) ;
2015-09-30 13:20:35 -05:00
2014-06-07 02:16:58 -05:00
return sendmsg ( cinfo , & cmsg ) ;
}
2015-09-30 13:20:35 -05:00
static int resync_finish ( struct mddev * mddev )
{
struct md_cluster_info * cinfo = mddev - > cluster_info ;
cinfo - > resync_lockres - > flags & = ~ DLM_LKF_NOQUEUE ;
dlm_unlock_sync ( cinfo - > resync_lockres ) ;
return resync_info_update ( mddev , 0 , 0 ) ;
}
2015-06-24 09:30:32 -05:00
static int area_resyncing ( struct mddev * mddev , int direction ,
sector_t lo , sector_t hi )
2014-06-07 02:39:37 -05:00
{
struct md_cluster_info * cinfo = mddev - > cluster_info ;
int ret = 0 ;
struct suspend_info * s ;
2015-06-24 09:30:32 -05:00
if ( ( direction = = READ ) & &
test_bit ( MD_CLUSTER_SUSPEND_READ_BALANCING , & cinfo - > state ) )
return 1 ;
2014-06-07 02:39:37 -05:00
spin_lock_irq ( & cinfo - > suspend_lock ) ;
if ( list_empty ( & cinfo - > suspend_list ) )
goto out ;
list_for_each_entry ( s , & cinfo - > suspend_list , list )
if ( hi > s - > lo & & lo < s - > hi ) {
ret = 1 ;
break ;
}
out :
spin_unlock_irq ( & cinfo - > suspend_lock ) ;
return ret ;
}
2015-10-01 13:20:27 -05:00
/* add_new_disk() - initiates a disk add
* However , if this fails before writing md_update_sb ( ) ,
* add_new_disk_cancel ( ) must be called to release token lock
*/
static int add_new_disk ( struct mddev * mddev , struct md_rdev * rdev )
2014-10-29 18:51:31 -05:00
{
struct md_cluster_info * cinfo = mddev - > cluster_info ;
struct cluster_msg cmsg ;
int ret = 0 ;
struct mdp_superblock_1 * sb = page_address ( rdev - > sb_page ) ;
char * uuid = sb - > device_uuid ;
memset ( & cmsg , 0 , sizeof ( cmsg ) ) ;
cmsg . type = cpu_to_le32 ( NEWDISK ) ;
memcpy ( cmsg . uuid , uuid , 16 ) ;
2015-10-12 17:21:21 +08:00
cmsg . raid_slot = cpu_to_le32 ( rdev - > desc_nr ) ;
2014-10-29 18:51:31 -05:00
lock_comm ( cinfo ) ;
ret = __sendmsg ( cinfo , & cmsg ) ;
if ( ret )
return ret ;
cinfo - > no_new_dev_lockres - > flags | = DLM_LKF_NOQUEUE ;
ret = dlm_lock_sync ( cinfo - > no_new_dev_lockres , DLM_LOCK_EX ) ;
cinfo - > no_new_dev_lockres - > flags & = ~ DLM_LKF_NOQUEUE ;
/* Some node does not "see" the device */
if ( ret = = - EAGAIN )
ret = - ENOENT ;
2015-10-01 13:20:27 -05:00
if ( ret )
unlock_comm ( cinfo ) ;
2014-10-29 18:51:31 -05:00
else
dlm_lock_sync ( cinfo - > no_new_dev_lockres , DLM_LOCK_CR ) ;
return ret ;
}
2015-10-01 13:20:27 -05:00
static void add_new_disk_cancel ( struct mddev * mddev )
2014-10-29 18:51:31 -05:00
{
2015-10-01 13:20:27 -05:00
struct md_cluster_info * cinfo = mddev - > cluster_info ;
unlock_comm ( cinfo ) ;
2014-10-29 18:51:31 -05:00
}
2015-03-02 10:55:49 -06:00
static int new_disk_ack ( struct mddev * mddev , bool ack )
2014-10-29 18:51:31 -05:00
{
struct md_cluster_info * cinfo = mddev - > cluster_info ;
2015-03-02 10:55:49 -06:00
if ( ! test_bit ( MD_CLUSTER_WAITING_FOR_NEWDISK , & cinfo - > state ) ) {
pr_warn ( " md-cluster(%s): Spurious cluster confirmation \n " , mdname ( mddev ) ) ;
return - EINVAL ;
}
2014-10-29 18:51:31 -05:00
if ( ack )
dlm_unlock_sync ( cinfo - > no_new_dev_lockres ) ;
complete ( & cinfo - > newdisk_completion ) ;
2015-03-02 10:55:49 -06:00
return 0 ;
2014-10-29 18:51:31 -05:00
}
2015-04-14 10:44:44 -05:00
static int remove_disk ( struct mddev * mddev , struct md_rdev * rdev )
{
2015-10-12 17:21:24 +08:00
struct cluster_msg cmsg = { 0 } ;
2015-04-14 10:44:44 -05:00
struct md_cluster_info * cinfo = mddev - > cluster_info ;
2015-10-12 17:21:21 +08:00
cmsg . type = cpu_to_le32 ( REMOVE ) ;
cmsg . raid_slot = cpu_to_le32 ( rdev - > desc_nr ) ;
2015-04-14 10:44:44 -05:00
return __sendmsg ( cinfo , & cmsg ) ;
}
2015-04-14 10:45:42 -05:00
static int gather_bitmaps ( struct md_rdev * rdev )
{
int sn , err ;
sector_t lo , hi ;
2015-10-12 17:21:24 +08:00
struct cluster_msg cmsg = { 0 } ;
2015-04-14 10:45:42 -05:00
struct mddev * mddev = rdev - > mddev ;
struct md_cluster_info * cinfo = mddev - > cluster_info ;
2015-10-12 17:21:21 +08:00
cmsg . type = cpu_to_le32 ( RE_ADD ) ;
cmsg . raid_slot = cpu_to_le32 ( rdev - > desc_nr ) ;
2015-04-14 10:45:42 -05:00
err = sendmsg ( cinfo , & cmsg ) ;
if ( err )
goto out ;
for ( sn = 0 ; sn < mddev - > bitmap_info . nodes ; sn + + ) {
if ( sn = = ( cinfo - > slot_number - 1 ) )
continue ;
err = bitmap_copy_from_slot ( mddev , sn , & lo , & hi , false ) ;
if ( err ) {
pr_warn ( " md-cluster: Could not gather bitmaps from slot %d " , sn ) ;
goto out ;
}
if ( ( hi > 0 ) & & ( lo < mddev - > recovery_cp ) )
mddev - > recovery_cp = lo ;
}
out :
return err ;
}
2014-03-29 10:01:53 -05:00
static struct md_cluster_operations cluster_ops = {
. join = join ,
. leave = leave ,
2014-03-30 00:42:49 -05:00
. slot_number = slot_number ,
2015-09-30 13:20:35 -05:00
. resync_start = resync_start ,
. resync_finish = resync_finish ,
2014-06-06 12:35:34 -05:00
. resync_info_update = resync_info_update ,
2014-06-07 01:44:51 -05:00
. metadata_update_start = metadata_update_start ,
. metadata_update_finish = metadata_update_finish ,
. metadata_update_cancel = metadata_update_cancel ,
2014-06-07 02:39:37 -05:00
. area_resyncing = area_resyncing ,
2015-10-01 13:20:27 -05:00
. add_new_disk = add_new_disk ,
. add_new_disk_cancel = add_new_disk_cancel ,
2014-10-29 18:51:31 -05:00
. new_disk_ack = new_disk_ack ,
2015-04-14 10:44:44 -05:00
. remove_disk = remove_disk ,
2015-04-14 10:45:42 -05:00
. gather_bitmaps = gather_bitmaps ,
2014-03-29 10:01:53 -05:00
} ;
2014-03-07 11:21:15 -06:00
static int __init cluster_init ( void )
{
pr_warn ( " md-cluster: EXPERIMENTAL. Use with caution \n " ) ;
pr_info ( " Registering Cluster MD functions \n " ) ;
2014-03-29 10:01:53 -05:00
register_md_cluster_operations ( & cluster_ops , THIS_MODULE ) ;
2014-03-07 11:21:15 -06:00
return 0 ;
}
static void cluster_exit ( void )
{
2014-03-29 10:01:53 -05:00
unregister_md_cluster_operations ( ) ;
2014-03-07 11:21:15 -06:00
}
module_init ( cluster_init ) ;
module_exit ( cluster_exit ) ;
2015-10-12 17:21:25 +08:00
MODULE_AUTHOR ( " SUSE " ) ;
2014-03-07 11:21:15 -06:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Clustering support for MD " ) ;