1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-30 17:18:21 +03:00

dmeventd: rework raid plugin

Switch to per-thread mem-pool and lock only for lvm2 calls.
Use libdm parser for raid status line.
This commit is contained in:
Zdenek Kabelac 2015-10-13 11:37:14 +02:00
parent 256e432e78
commit 4b586ad3c2
2 changed files with 29 additions and 88 deletions

View File

@ -1,5 +1,6 @@
Version 1.02.110 - Version 1.02.110 -
====================================== ======================================
Use libdm status parsing and local mem raid dmeventd plugin.
Use local mem pool and lock only lvm2 execution for mirror dmeventd plugin. Use local mem pool and lock only lvm2 execution for mirror dmeventd plugin.
Lock protect only lvm2 execution for snapshot and thin dmeventd plugin. Lock protect only lvm2 execution for snapshot and thin dmeventd plugin.
Use local mempool for raid and mirror plugins. Use local mempool for raid and mirror plugins.

View File

@ -27,112 +27,54 @@ DM_EVENT_LOG_FN("raid")
/* FIXME Reformat to 80 char lines. */ /* FIXME Reformat to 80 char lines. */
/* static int _process_raid_event(struct dso_state *state, char *params, const char *device)
* run_repair is a close copy to
* plugins/mirror/dmeventd_mirror.c:_remove_failed_devices()
*/
static int run_repair(const char *device)
{ {
int r; struct dm_status_raid *status;
#define CMD_SIZE 256 /* FIXME Use system restriction */ const char *d;
char cmd_str[CMD_SIZE];
if (!dmeventd_lvm2_command(dmeventd_lvm2_pool(), cmd_str, sizeof(cmd_str), if (!dm_get_status_raid(state->mem, params, &status)) {
"lvscan --cache", device))
return -1;
r = dmeventd_lvm2_run(cmd_str);
if (!r)
log_info("Re-scan of RAID device %s failed.", device);
if (!dmeventd_lvm2_command(dmeventd_lvm2_pool(), cmd_str, sizeof(cmd_str),
"lvconvert --config devices{ignore_suspended_devices=1} "
"--repair --use-policies", device))
return -1;
/* if repair goes OK, report success even if lvscan has failed */
r = dmeventd_lvm2_run(cmd_str);
if (!r)
log_info("Repair of RAID device %s failed.", device);
return (r) ? 0 : -1;
}
static int _process_raid_event(char *params, const char *device)
{
int i, n, failure = 0;
char *p, *a[4];
char *raid_type;
char *num_devices;
char *health_chars;
char *resync_ratio;
/*
* RAID parms: <raid_type> <#raid_disks> \
* <health chars> <resync ratio>
*/
if (!dm_split_words(params, 4, 0, a)) {
log_error("Failed to process status line for %s.", device); log_error("Failed to process status line for %s.", device);
return -EINVAL; return 0;
}
raid_type = a[0];
num_devices = a[1];
health_chars = a[2];
resync_ratio = a[3];
if (!(n = atoi(num_devices))) {
log_error("Failed to parse number of devices for %s: %s.",
device, num_devices);
return -EINVAL;
} }
for (i = 0; i < n; i++) { if ((d = strchr(status->dev_health, 'D')) && !state->failed) {
switch (health_chars[i]) { log_error("Device #%d of %s array, %s, has failed.",
case 'A': (int)(d - status->dev_health),
/* Device is 'A'live and well */ status->raid_type, device);
case 'a':
/* Device is 'a'live, but not yet in-sync */ state->failed = 1;
break; if (!dmeventd_lvm2_run_with_lock(state->cmd_lvscan))
case 'D': log_info("Re-scan of RAID device %s failed.", device);
log_error("Device #%d of %s array, %s, has failed.",
i, raid_type, device); /* if repair goes OK, report success even if lvscan has failed */
failure++; if (!dmeventd_lvm2_run_with_lock(state->cmd_lvconvert)) {
break; log_info("Repair of RAID device %s failed.", device);
default: dm_pool_free(state->mem, status);
/* Unhandled character returned from kernel */ return 0;
break;
} }
if (failure) } else {
return run_repair(device); state->failed = 0;
log_info("%s array, %s, is %s in-sync.",
status->raid_type, device,
(status->insync_regions == status->total_regions) ? "now" : "not");
} }
p = strstr(resync_ratio, "/"); dm_pool_free(state->mem, status);
if (!p) {
log_error("Failed to parse resync_ratio for %s: %s.",
device, resync_ratio);
return -EINVAL;
}
p[0] = '\0';
log_info("%s array, %s, is %s in-sync.",
raid_type, device, strcmp(resync_ratio, p+1) ? "not" : "now");
return 0; return 1;
} }
void process_event(struct dm_task *dmt, void process_event(struct dm_task *dmt,
enum dm_event_mask event __attribute__((unused)), enum dm_event_mask event __attribute__((unused)),
void **user) void **user)
{ {
struct dso_state *state = *user;
void *next = NULL; void *next = NULL;
uint64_t start, length; uint64_t start, length;
char *target_type = NULL; char *target_type = NULL;
char *params; char *params;
const char *device = dm_task_get_name(dmt); const char *device = dm_task_get_name(dmt);
dmeventd_lvm2_lock();
do { do {
next = dm_get_next_target(dmt, next, &start, &length, next = dm_get_next_target(dmt, next, &start, &length,
&target_type, &params); &target_type, &params);
@ -147,12 +89,10 @@ void process_event(struct dm_task *dmt,
continue; continue;
} }
if (_process_raid_event(params, device)) if (!_process_raid_event(state, params, device))
log_error("Failed to process event for %s.", log_error("Failed to process event for %s.",
device); device);
} while (next); } while (next);
dmeventd_lvm2_unlock();
} }
int register_device(const char *device, int register_device(const char *device,