1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00

cleanup: typos in comments

Collection of typos in code comments.
Should have no runtime effect.
This commit is contained in:
Zdenek Kabelac 2024-08-29 23:05:41 +02:00
parent 7da47cea35
commit 39b7d1ba8f
137 changed files with 384 additions and 384 deletions

View File

@ -30,10 +30,10 @@ struct dm_hash_table {
unsigned num_nodes; unsigned num_nodes;
unsigned num_hint; unsigned num_hint;
unsigned mask_slots; /* (slots - 1) -> used as hash mask */ unsigned mask_slots; /* (slots - 1) -> used as hash mask */
unsigned collisions; /* Collissions of hash keys */ unsigned collisions; /* Collisions of hash keys */
unsigned search; /* How many keys were searched */ unsigned search; /* How many keys were searched */
unsigned found; /* How many nodes were found */ unsigned found; /* How many nodes were found */
unsigned same_hash; /* Was there a colision with same masked hash and len ? */ unsigned same_hash; /* Was there a collision with same masked hash and len ? */
struct dm_hash_node **slots; struct dm_hash_node **slots;
}; };
@ -348,7 +348,7 @@ int dm_hash_insert_allow_multiple(struct dm_hash_table *t, const char *key,
/* /*
* Look through multiple entries with the same key for one that has a * Look through multiple entries with the same key for one that has a
* matching val and return that. If none have maching val, return NULL. * matching val and return that. If none have matching val, return NULL.
*/ */
void *dm_hash_lookup_with_val(struct dm_hash_table *t, const char *key, void *dm_hash_lookup_with_val(struct dm_hash_table *t, const char *key,
const void *val, uint32_t val_len) const void *val, uint32_t val_len)

View File

@ -21,7 +21,7 @@
* compile (using outdir 'cov'): * compile (using outdir 'cov'):
* cov-build --dir=cov make CC=gcc * cov-build --dir=cov make CC=gcc
* *
* analyze (agressively, using 'cov') * analyze (aggressively, using 'cov')
* cov-analyze --dir cov --wait-for-license --hfa --concurrency --enable-fnptr --enable-constraint-fpp --security --all --aggressiveness-level=high --field-offset-escape --user-model-file=coverity/coverity_model.xml * cov-analyze --dir cov --wait-for-license --hfa --concurrency --enable-fnptr --enable-constraint-fpp --security --all --aggressiveness-level=high --field-offset-escape --user-model-file=coverity/coverity_model.xml
* *
* generate html output (to 'html' from 'cov'): * generate html output (to 'html' from 'cov'):
@ -70,7 +70,7 @@ void *memccpy(void *dest, const void *src, int c, size_t n)
} }
/* /*
* 2 lines bellow needs to be placed in coverity/config/user_nodefs.h * 2 lines below needs to be placed in coverity/config/user_nodefs.h
* Not sure about any other way. * Not sure about any other way.
* Without them, coverity shows warning since x86 system header files * Without them, coverity shows warning since x86 system header files
* are using inline assembly to reset fdset * are using inline assembly to reset fdset

View File

@ -279,7 +279,7 @@ static int handle_cluster_request(struct clog_cpg *entry __attribute__((unused))
* With resumes, we only handle our own. * With resumes, we only handle our own.
* Resume is a special case that requires * Resume is a special case that requires
* local action (to set up CPG), followed by * local action (to set up CPG), followed by
* a cluster action to co-ordinate reading * a cluster action to coordinate reading
* the disk and checkpointing * the disk and checkpointing
*/ */
if (tmp->u_rq.request_type == DM_ULOG_RESUME) { if (tmp->u_rq.request_type == DM_ULOG_RESUME) {

View File

@ -254,7 +254,7 @@ static int read_log(struct log_c *lc)
bitset_size = lc->region_count / 8; bitset_size = lc->region_count / 8;
bitset_size += (lc->region_count % 8) ? 1 : 0; bitset_size += (lc->region_count % 8) ? 1 : 0;
/* 'lc->clean_bits + 1' becasue dm_bitset_t leads with a uint32_t */ /* 'lc->clean_bits + 1' because dm_bitset_t leads with a uint32_t */
memcpy(lc->clean_bits + 1, (char *)lc->disk_buffer + 1024, bitset_size); memcpy(lc->clean_bits + 1, (char *)lc->disk_buffer + 1024, bitset_size);
return 0; return 0;
@ -281,7 +281,7 @@ static int write_log(struct log_c *lc)
bitset_size = lc->region_count / 8; bitset_size = lc->region_count / 8;
bitset_size += (lc->region_count % 8) ? 1 : 0; bitset_size += (lc->region_count % 8) ? 1 : 0;
/* 'lc->clean_bits + 1' becasue dm_bitset_t leads with a uint32_t */ /* 'lc->clean_bits + 1' because dm_bitset_t leads with a uint32_t */
memcpy((char *)lc->disk_buffer + 1024, lc->clean_bits + 1, bitset_size); memcpy((char *)lc->disk_buffer + 1024, lc->clean_bits + 1, bitset_size);
if (rw_log(lc, 1)) { if (rw_log(lc, 1)) {
@ -927,7 +927,7 @@ int local_resume(struct dm_ulog_request *rq)
* *
* Since this value doesn't change, the kernel * Since this value doesn't change, the kernel
* should not need to talk to server to get this * should not need to talk to server to get this
* The function is here for completness * The function is here for completeness
* *
* Returns: 0 on success, -EXXX on failure * Returns: 0 on success, -EXXX on failure
*/ */
@ -1018,7 +1018,7 @@ static int clog_in_sync(struct dm_ulog_request *rq)
* happen for reads is that additional read attempts may be * happen for reads is that additional read attempts may be
* taken. * taken.
* *
* Futher investigation may be required to determine if there are * Further investigation may be required to determine if there are
* similar possible outcomes when the mirror is in the process of * similar possible outcomes when the mirror is in the process of
* recovering. In that case, lc->in_sync would not have been set * recovering. In that case, lc->in_sync would not have been set
* yet. * yet.

View File

@ -236,7 +236,7 @@ struct thread_status {
int status; /* See DM_THREAD_{REGISTERING,RUNNING,DONE} */ int status; /* See DM_THREAD_{REGISTERING,RUNNING,DONE} */
int events; /* bitfield for event filter. */ int events; /* bitfield for event filter. */
int current_events; /* bitfield for occured events. */ int current_events; /* bitfield for occurred events. */
struct dm_task *wait_task; struct dm_task *wait_task;
int pending; /* Set when event filter change is pending */ int pending; /* Set when event filter change is pending */
time_t next_time; time_t next_time;
@ -427,7 +427,7 @@ static struct thread_status *_alloc_thread_status(const struct message_data *dat
if (!(thread->device.name = strdup(data->device_uuid))) if (!(thread->device.name = strdup(data->device_uuid)))
goto_out; goto_out;
/* runs ioctl and may register lvm2 pluging */ /* runs ioctl and may register lvm2 plugin */
thread->processing = 1; thread->processing = 1;
thread->status = DM_THREAD_REGISTERING; thread->status = DM_THREAD_REGISTERING;
@ -520,7 +520,7 @@ static int _fetch_string(char **ptr, char **src, const int delimiter)
*p = delimiter; *p = delimiter;
*src = p; *src = p;
} }
(*src)++; /* Skip delmiter, next field */ (*src)++; /* Skip delimiter, next field */
} else if ((len = strlen(*src))) { } else if ((len = strlen(*src))) {
/* No delimiter, item ends with '\0' */ /* No delimiter, item ends with '\0' */
if (!(*ptr = strdup(*src))) { if (!(*ptr = strdup(*src))) {

View File

@ -404,7 +404,7 @@ int daemon_talk(struct dm_event_fifos *fifos,
/* /*
* Check for usable client fifo file * Check for usable client fifo file
* *
* Returns: 2 cliant path does not exists, dmeventd should be restarted * Returns: 2 client path does not exists, dmeventd should be restarted
* 1 on success, 0 otherwise * 1 on success, 0 otherwise
*/ */
static int _check_for_usable_fifos(char *dmeventd_path, struct dm_event_fifos *fifos) static int _check_for_usable_fifos(char *dmeventd_path, struct dm_event_fifos *fifos)
@ -1007,7 +1007,7 @@ void dm_event_log(const char *subsys, int level, const char *file,
static char *_skip_string(char *src, const int delimiter) static char *_skip_string(char *src, const int delimiter)
{ {
src = srtchr(src, delimiter); src = strchr(src, delimiter);
if (src && *(src + 1)) if (src && *(src + 1))
return src + 1; return src + 1;
return NULL; return NULL;

View File

@ -41,7 +41,7 @@ enum dm_event_mask {
DM_EVENT_STATUS_MASK = 0xFF0000, DM_EVENT_STATUS_MASK = 0xFF0000,
DM_EVENT_SYNC_STATUS = 0x010000, /* Mirror synchronization completed/failed. */ DM_EVENT_SYNC_STATUS = 0x010000, /* Mirror synchronization completed/failed. */
DM_EVENT_TIMEOUT = 0x020000, /* Timeout has occured */ DM_EVENT_TIMEOUT = 0x020000, /* Timeout has occurred */
DM_EVENT_REGISTRATION_PENDING = 0x1000000, /* Monitor thread is setting-up/shutting-down */ DM_EVENT_REGISTRATION_PENDING = 0x1000000, /* Monitor thread is setting-up/shutting-down */
}; };
@ -109,7 +109,7 @@ int dm_event_unregister_handler(const struct dm_event_handler *dmevh);
/* Set debug level for logging, and whether to log on stdout/stderr or syslog */ /* Set debug level for logging, and whether to log on stdout/stderr or syslog */
void dm_event_log_set(int debug_log_level, int use_syslog); void dm_event_log_set(int debug_log_level, int use_syslog);
/* Log messages acroding to current debug level */ /* Log messages according to current debug level */
__attribute__((format(printf, 6, 0))) __attribute__((format(printf, 6, 0)))
void dm_event_log(const char *subsys, int level, const char *file, void dm_event_log(const char *subsys, int level, const char *file,
int line, int dm_errno_or_class, int line, int dm_errno_or_class,

View File

@ -38,7 +38,7 @@ static void _process_status_code(dm_status_mirror_health_t health,
* A => Alive - No failures * A => Alive - No failures
* D => Dead - A write failure occurred leaving mirror out-of-sync * D => Dead - A write failure occurred leaving mirror out-of-sync
* F => Flush failed. * F => Flush failed.
* S => Sync - A sychronization failure occurred, mirror out-of-sync * S => Sync - A synchronization failure occurred, mirror out-of-sync
* R => Read - A read failure occurred, mirror data unaffected * R => Read - A read failure occurred, mirror data unaffected
* U => Unclassified failure (bug) * U => Unclassified failure (bug)
*/ */

View File

@ -17,7 +17,7 @@
#include "daemons/dmeventd/libdevmapper-event.h" #include "daemons/dmeventd/libdevmapper-event.h"
#include "lib/config/defaults.h" #include "lib/config/defaults.h"
/* Hold enough elements for the mximum number of RAID images */ /* Hold enough elements for the maximum number of RAID images */
#define RAID_DEVS_ELEMS ((DEFAULT_RAID_MAX_IMAGES + 63) / 64) #define RAID_DEVS_ELEMS ((DEFAULT_RAID_MAX_IMAGES + 63) / 64)
struct dso_state { struct dso_state {

View File

@ -87,7 +87,7 @@ static int _run_command(struct dso_state *state)
log_verbose("Executing command: %s", state->cmd_str); log_verbose("Executing command: %s", state->cmd_str);
/* TODO: /* TODO:
* Support parallel run of 'task' and it's waitpid maintainence * Support parallel run of 'task' and it's waitpid maintenance
* ATM we can't handle signaling of SIGALRM * ATM we can't handle signaling of SIGALRM
* as signalling is not allowed while 'process_event()' is running * as signalling is not allowed while 'process_event()' is running
*/ */
@ -245,7 +245,7 @@ void process_event(struct dm_task *dmt,
/* /*
* Trigger action when threshold boundary is exceeded. * Trigger action when threshold boundary is exceeded.
* Report 80% threshold warning when it's used above 80%. * Report 80% threshold warning when it's used above 80%.
* Only 100% is exception as it cannot be surpased so policy * Only 100% is exception as it cannot be surpassed so policy
* action is called for: >50%, >55% ... >95%, 100% * action is called for: >50%, >55% ... >95%, 100%
*/ */
state->metadata_percent = dm_make_percent(tps->used_metadata_blocks, tps->total_metadata_blocks); state->metadata_percent = dm_make_percent(tps->used_metadata_blocks, tps->total_metadata_blocks);
@ -379,7 +379,7 @@ int register_device(const char *device,
state->argv[1] = str + 1; /* 1 argument - vg/lv */ state->argv[1] = str + 1; /* 1 argument - vg/lv */
_init_thread_signals(state); _init_thread_signals(state);
} else /* Unuspported command format */ } else /* Unsupported command format */
goto inval; goto inval;
state->pid = -1; state->pid = -1;

View File

@ -19,7 +19,7 @@
/* /*
* Use parser from new device_mapper library. * Use parser from new device_mapper library.
* Although during compilation we can see dm_vdo_status_parse() * Although during compilation we can see dm_vdo_status_parse()
* in runtime we are linked agains systems libdm 'older' library * in runtime we are linked against systems libdm 'older' library
* which does not provide this symbol and plugin fails to load * which does not provide this symbol and plugin fails to load
*/ */
/* coverity[unnecessary_header] used for parsing */ /* coverity[unnecessary_header] used for parsing */
@ -78,7 +78,7 @@ static int _run_command(struct dso_state *state)
log_verbose("Executing command: %s", state->cmd_str); log_verbose("Executing command: %s", state->cmd_str);
/* TODO: /* TODO:
* Support parallel run of 'task' and it's waitpid maintainence * Support parallel run of 'task' and it's waitpid maintenance
* ATM we can't handle signaling of SIGALRM * ATM we can't handle signaling of SIGALRM
* as signalling is not allowed while 'process_event()' is running * as signalling is not allowed while 'process_event()' is running
*/ */
@ -227,7 +227,7 @@ void process_event(struct dm_task *dmt,
/* /*
* Trigger action when threshold boundary is exceeded. * Trigger action when threshold boundary is exceeded.
* Report 80% threshold warning when it's used above 80%. * Report 80% threshold warning when it's used above 80%.
* Only 100% is exception as it cannot be surpased so policy * Only 100% is exception as it cannot be surpassed so policy
* action is called for: >50%, >55% ... >95%, 100% * action is called for: >50%, >55% ... >95%, 100%
*/ */
if ((state->percent > WARNING_THRESH) && if ((state->percent > WARNING_THRESH) &&
@ -354,7 +354,7 @@ int register_device(const char *device,
_init_thread_signals(state); _init_thread_signals(state);
} else if (cmd[0] == 0) { } else if (cmd[0] == 0) {
state->name = "volume"; /* What to use with 'others?' */ state->name = "volume"; /* What to use with 'others?' */
} else/* Unuspported command format */ } else/* Unsupported command format */
goto inval; goto inval;
state->pid = -1; state->pid = -1;

View File

@ -67,7 +67,7 @@ def lvs_state_retrieve(selection, cache_refresh=True):
try: try:
# When building up the model, it's best to process LVs with the least # When building up the model, it's best to process LVs with the least
# dependencies to those that are dependant upon other LVs. Otherwise, when # dependencies to those that are dependent upon other LVs. Otherwise, when
# we are trying to gather information we could be in a position where we # we are trying to gather information we could be in a position where we
# don't have information available yet. # don't have information available yet.
lvs = sorted(cfg.db.fetch_lvs(selection), key=get_key) lvs = sorted(cfg.db.fetch_lvs(selection), key=get_key)

View File

@ -351,7 +351,7 @@ class DataStore(object):
else: else:
rc = [] rc = []
for s in pv_name: for s in pv_name:
# Ths user could be using a symlink instead of the actual # The user could be using a symlink instead of the actual
# block device, make sure we are using actual block device file # block device, make sure we are using actual block device file
# if the pv name isn't in the lookup # if the pv name isn't in the lookup
if s not in self.pv_path_to_uuid: if s not in self.pv_path_to_uuid:

View File

@ -160,7 +160,7 @@ class ObjectManager(AutomatedProperties):
# (path, dbus_object.lvm_id)) # (path, dbus_object.lvm_id))
# We want fast access to the object by a number of different ways, # We want fast access to the object by a number of different ways,
# so we use multiple hashs with different keys # so we use multiple hashes with different keys
self._lookup_add(dbus_object, path, dbus_object.lvm_id, self._lookup_add(dbus_object, path, dbus_object.lvm_id,
dbus_object.Uuid) dbus_object.Uuid)

View File

@ -78,7 +78,7 @@ static int str_to_mode(const char *str);
* . Other misc actions are are passed to the worker_thread: * . Other misc actions are are passed to the worker_thread:
* add_work_action(act). * add_work_action(act).
* *
* Onec the client_thread has passed the action off to another * Once the client_thread has passed the action off to another
* thread to process, it goes back to waiting for more client * thread to process, it goes back to waiting for more client
* handling work to do. * handling work to do.
* *
@ -1413,7 +1413,7 @@ static int res_lock(struct lockspace *ls, struct resource *r, struct action *act
* It can be lost during dlm recovery when the master node * It can be lost during dlm recovery when the master node
* is removed. * is removed.
* *
* If we're the next to write the lvb, reinitialze it to the * If we're the next to write the lvb, reinitialize it to the
* new VG seqno, or a new GL counter larger than was seen by * new VG seqno, or a new GL counter larger than was seen by
* any hosts before (how to estimate that?) * any hosts before (how to estimate that?)
* *
@ -2798,7 +2798,7 @@ out_rem:
* operating under the assumption that they hold the lock. * operating under the assumption that they hold the lock.
* drop_vg drops all existing locks, but should only * drop_vg drops all existing locks, but should only
* happen when the VG access has been forcibly and * happen when the VG access has been forcibly and
* succesfully terminated. * successfully terminated.
* *
* For vgremove of a sanlock vg, the vg lock will be held, * For vgremove of a sanlock vg, the vg lock will be held,
* and possibly the gl lock if this vg holds the gl. * and possibly the gl lock if this vg holds the gl.
@ -3042,7 +3042,7 @@ static int add_lockspace_thread(const char *ls_name,
ls->start_client_id = act->client_id; ls->start_client_id = act->client_id;
/* /*
* Copy PV list to lockspact structure, so this is * Copy PV list to lockspace structure, so this is
* used for VG locking for idm scheme. * used for VG locking for idm scheme.
*/ */
if (lm_type == LD_LM_IDM && if (lm_type == LD_LM_IDM &&
@ -3287,7 +3287,7 @@ static int add_lockspace(struct action *act)
/* /*
* vgchange --lock-stop vgname will lock the vg ex, then send a stop, * vgchange --lock-stop vgname will lock the vg ex, then send a stop,
* so we exect to find the ex vg lock held here, and will automatically * so we expect to find the ex vg lock held here, and will automatically
* unlock it when stopping. * unlock it when stopping.
* *
* Should we attempt to stop the lockspace containing the gl last? * Should we attempt to stop the lockspace containing the gl last?
@ -4026,7 +4026,7 @@ static int client_send_result(struct client *cl, struct action *act)
* The lockspace could not be found, in which case * The lockspace could not be found, in which case
* the caller may want to know if any lockspaces exist * the caller may want to know if any lockspaces exist
* or if lockspaces exist, but not one with the global lock. * or if lockspaces exist, but not one with the global lock.
* Given this detail, it may be able to procede without * Given this detail, it may be able to proceed without
* the lock. * the lock.
*/ */
pthread_mutex_lock(&lockspaces_mutex); pthread_mutex_lock(&lockspaces_mutex);

View File

@ -364,7 +364,7 @@ static void lm_idm_update_vb_timestamp(uint64_t *vb_timestamp)
/* /*
* It's possible that the multiple nodes have no clock * It's possible that the multiple nodes have no clock
* synchronization with microsecond prcision and the time * synchronization with microsecond precision and the time
* is going backward. For this case, simply increment the * is going backward. For this case, simply increment the
* existing timestamp and write out to drive. * existing timestamp and write out to drive.
*/ */

View File

@ -106,7 +106,7 @@ which:
1. Uses syslog to explain what is happening. 1. Uses syslog to explain what is happening.
2. Notifies lvmlockd that the VG is being killed, so lvmlockd can 2. Notifies lvmlockd that the VG is being killed, so lvmlockd can
immediatley return an error for this condition if any new lock immediately return an error for this condition if any new lock
requests are made. (This step would not be strictly necessary.) requests are made. (This step would not be strictly necessary.)
3. Attempts to quit using the VG. This is not yet implemented, but 3. Attempts to quit using the VG. This is not yet implemented, but
@ -136,7 +136,7 @@ release all the leases for the VG.
* from each pid: signals due to a sanlock_request, and * from each pid: signals due to a sanlock_request, and
* acquire/release/convert/inquire. The later can probably be * acquire/release/convert/inquire. The later can probably be
* addressed with a flag to indicate that the pid field should be * addressed with a flag to indicate that the pid field should be
* interpretted as 'ci' (which the caller would need to figure * interpreted as 'ci' (which the caller would need to figure
* out somehow.) * out somehow.)
*/ */

View File

@ -278,7 +278,7 @@ static void _pdlv_locked_dump(struct buffer *buff, const struct lvmpolld_lv *pdl
if (dm_snprintf(tmp, sizeof(tmp), "\t\tinit_requests_count=%d\n", pdlv->init_rq_count) > 0) if (dm_snprintf(tmp, sizeof(tmp), "\t\tinit_requests_count=%d\n", pdlv->init_rq_count) > 0)
buffer_append(buff, tmp); buffer_append(buff, tmp);
/* lvm_commmand-section { */ /* lvm_command-section { */
buffer_append(buff, "\t\tlvm_command {\n"); buffer_append(buff, "\t\tlvm_command {\n");
if (cmd_state->retcode == -1 && !cmd_state->signal) if (cmd_state->retcode == -1 && !cmd_state->signal)
buffer_append(buff, "\t\t\tstate=\"" LVMPD_RESP_IN_PROGRESS "\"\n"); buffer_append(buff, "\t\t\tstate=\"" LVMPD_RESP_IN_PROGRESS "\"\n");
@ -290,7 +290,7 @@ static void _pdlv_locked_dump(struct buffer *buff, const struct lvmpolld_lv *pdl
buffer_append(buff, tmp); buffer_append(buff, tmp);
} }
buffer_append(buff, "\t\t}\n"); buffer_append(buff, "\t\t}\n");
/* } lvm_commmand-section */ /* } lvm_command-section */
buffer_append(buff, "\t}\n"); buffer_append(buff, "\t}\n");
/* } pdlv-section */ /* } pdlv-section */

View File

@ -68,7 +68,7 @@ struct lvmpolld_lv {
struct lvmpolld_cmd_stat cmd_state; struct lvmpolld_cmd_stat cmd_state;
unsigned init_rq_count; /* for debugging purposes only */ unsigned init_rq_count; /* for debugging purposes only */
unsigned polling_finished:1; /* no more updates */ unsigned polling_finished:1; /* no more updates */
unsigned error:1; /* unrecoverable error occured in lvmpolld */ unsigned error:1; /* unrecoverable error occurred in lvmpolld */
}; };
typedef void (*lvmpolld_parse_output_fn_t) (struct lvmpolld_lv *pdlv, const char *line); typedef void (*lvmpolld_parse_output_fn_t) (struct lvmpolld_lv *pdlv, const char *line);

View File

@ -45,7 +45,7 @@
#define LVMPD_RESP_OK "OK" #define LVMPD_RESP_OK "OK"
#define LVMPD_REAS_RETCODE "retcode" /* lvm cmd ret code */ #define LVMPD_REAS_RETCODE "retcode" /* lvm cmd ret code */
#define LVMPD_REAS_SIGNAL "signal" /* lvm cmd terminating singal */ #define LVMPD_REAS_SIGNAL "signal" /* lvm cmd terminating signal */
#define LVMPD_RET_DUP_FAILED 100 #define LVMPD_RET_DUP_FAILED 100
#define LVMPD_RET_EXC_FAILED 101 #define LVMPD_RET_EXC_FAILED 101

View File

@ -304,15 +304,15 @@ int dm_task_add_target(struct dm_task *dmt,
#define DM_FORMAT_DEV_BUFSIZE 13 /* Minimum bufsize to handle worst case. */ #define DM_FORMAT_DEV_BUFSIZE 13 /* Minimum bufsize to handle worst case. */
int dm_format_dev(char *buf, int bufsize, uint32_t dev_major, uint32_t dev_minor); int dm_format_dev(char *buf, int bufsize, uint32_t dev_major, uint32_t dev_minor);
/* Use this to retrive target information returned from a STATUS call */ /* Use this to retrieve target information returned from a STATUS call */
void *dm_get_next_target(struct dm_task *dmt, void *dm_get_next_target(struct dm_task *dmt,
void *next, uint64_t *start, uint64_t *length, void *next, uint64_t *start, uint64_t *length,
char **target_type, char **params); char **target_type, char **params);
/* /*
* Following dm_get_status_* functions will allocate approriate status structure * Following dm_get_status_* functions will allocate appropriate status structure
* from passed mempool together with the necessary character arrays. * from passed mempool together with the necessary character arrays.
* Destroying the mempool will release all asociated allocation. * Destroying the mempool will release all associated allocation.
*/ */
/* Parse params from STATUS call for mirror target */ /* Parse params from STATUS call for mirror target */
@ -541,7 +541,7 @@ const char *dm_sysfs_dir(void);
/* /*
* Configure default UUID prefix string. * Configure default UUID prefix string.
* Conventionally this is a short capitalised prefix indicating the subsystem * Conventionally this is a short capitalized prefix indicating the subsystem
* that is managing the devices, e.g. "LVM-" or "MPATH-". * that is managing the devices, e.g. "LVM-" or "MPATH-".
* To support stacks of devices from different subsystems, recursive functions * To support stacks of devices from different subsystems, recursive functions
* stop recursing if they reach a device with a different prefix. * stop recursing if they reach a device with a different prefix.
@ -584,7 +584,7 @@ int dm_device_has_mounted_fs(uint32_t major, uint32_t minor);
/* /*
* Callback is invoked for individal mountinfo lines, * Callback is invoked for individual mountinfo lines,
* minor, major and mount target are parsed and unmangled. * minor, major and mount target are parsed and unmangled.
*/ */
typedef int (*dm_mountinfo_line_callback_fn) (char *line, unsigned maj, unsigned min, typedef int (*dm_mountinfo_line_callback_fn) (char *line, unsigned maj, unsigned min,
@ -698,7 +698,7 @@ void *dm_tree_node_get_context(const struct dm_tree_node *node);
/* /*
* Returns 0 when node size and its children is unchanged. * Returns 0 when node size and its children is unchanged.
* Returns 1 when node or any of its children has increased size. * Returns 1 when node or any of its children has increased size.
* Rerurns -1 when node or any of its children has reduced size. * Returns -1 when node or any of its children has reduced size.
*/ */
int dm_tree_node_size_changed(const struct dm_tree_node *dnode); int dm_tree_node_size_changed(const struct dm_tree_node *dnode);
@ -885,7 +885,7 @@ struct dm_tree_node_raid_params {
}; };
/* /*
* Version 2 of above node raid params struct to keeep API compatibility. * Version 2 of above node raid params struct to keep API compatibility.
* *
* Extended for more than 64 legs (max 253 in the MD kernel runtime!), * Extended for more than 64 legs (max 253 in the MD kernel runtime!),
* delta_disks for disk add/remove reshaping, * delta_disks for disk add/remove reshaping,
@ -908,7 +908,7 @@ struct dm_tree_node_raid_params_v2 {
* 'rebuilds' and 'writemostly' are bitfields that signify * 'rebuilds' and 'writemostly' are bitfields that signify
* which devices in the array are to be rebuilt or marked * which devices in the array are to be rebuilt or marked
* writemostly. The kernel supports up to 253 legs. * writemostly. The kernel supports up to 253 legs.
* We limit ourselvs by choosing a lower value * We limit ourselves by choosing a lower value
* for DEFAULT_RAID_MAX_IMAGES. * for DEFAULT_RAID_MAX_IMAGES.
*/ */
uint64_t rebuilds[RAID_BITMAP_SIZE]; uint64_t rebuilds[RAID_BITMAP_SIZE];
@ -945,7 +945,7 @@ struct dm_config_node;
* *
* policy_settings { * policy_settings {
* migration_threshold=2048 * migration_threshold=2048
* sequention_threashold=100 * sequential_threshold=100
* ... * ...
* } * }
* *
@ -1094,7 +1094,7 @@ int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
/* End of Replicator API */ /* End of Replicator API */
/* /*
* FIXME: Defines bellow are based on kernel's dm-thin.c defines * FIXME: Defines below are based on kernel's dm-thin.c defines
* DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT) * DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
* DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) * DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
*/ */
@ -1160,7 +1160,7 @@ int dm_tree_node_set_thin_pool_error_if_no_space(struct dm_tree_node *node,
int dm_tree_node_set_thin_pool_read_only(struct dm_tree_node *node, int dm_tree_node_set_thin_pool_read_only(struct dm_tree_node *node,
unsigned read_only); unsigned read_only);
/* /*
* FIXME: Defines bellow are based on kernel's dm-thin.c defines * FIXME: Defines below are based on kernel's dm-thin.c defines
* MAX_DEV_ID ((1 << 24) - 1) * MAX_DEV_ID ((1 << 24) - 1)
*/ */
#define DM_THIN_MAX_DEVICE_ID (UINT32_C((1 << 24) - 1)) #define DM_THIN_MAX_DEVICE_ID (UINT32_C((1 << 24) - 1))
@ -2058,7 +2058,7 @@ void dm_config_destroy(struct dm_config_tree *cft);
/* Simple output line by line. */ /* Simple output line by line. */
typedef int (*dm_putline_fn)(const char *line, void *baton); typedef int (*dm_putline_fn)(const char *line, void *baton);
/* More advaced output with config node reference. */ /* More advanced output with config node reference. */
typedef int (*dm_config_node_out_fn)(const struct dm_config_node *cn, const char *line, void *baton); typedef int (*dm_config_node_out_fn)(const struct dm_config_node *cn, const char *line, void *baton);
/* /*
@ -2121,7 +2121,7 @@ struct dm_config_node *dm_config_clone_node(struct dm_config_tree *cft, const st
* Common formatting flags applicable to all config node types (lower 16 bits). * Common formatting flags applicable to all config node types (lower 16 bits).
*/ */
#define DM_CONFIG_VALUE_FMT_COMMON_ARRAY 0x00000001 /* value is array */ #define DM_CONFIG_VALUE_FMT_COMMON_ARRAY 0x00000001 /* value is array */
#define DM_CONFIG_VALUE_FMT_COMMON_EXTRA_SPACES 0x00000002 /* add spaces in "key = value" pairs in constrast to "key=value" for better readability */ #define DM_CONFIG_VALUE_FMT_COMMON_EXTRA_SPACES 0x00000002 /* add spaces in "key = value" pairs in contrast to "key=value" for better readability */
/* /*
* Type-related config node formatting flags (higher 16 bits). * Type-related config node formatting flags (higher 16 bits).

View File

@ -659,7 +659,7 @@ void *dm_get_next_target(struct dm_task *dmt, void *next,
return t->next; return t->next;
} }
/* Unmarshall the target info returned from a status call */ /* Unmarshal the target info returned from a status call */
static int _unmarshal_status(struct dm_task *dmt, struct dm_ioctl *dmi) static int _unmarshal_status(struct dm_task *dmt, struct dm_ioctl *dmi)
{ {
char *outbuf = (char *) dmi + dmi->data_start; char *outbuf = (char *) dmi + dmi->data_start;

View File

@ -1796,7 +1796,7 @@ static int _mountinfo_parse_line(const char *line, unsigned *maj, unsigned *min,
} }
/* /*
* Function to operate on individal mountinfo line, * Function to operate on individual mountinfo line,
* minor, major and mount target are parsed and unmangled * minor, major and mount target are parsed and unmangled
*/ */
int dm_mountinfo_read(dm_mountinfo_line_callback_fn read_fn, void *cb_data) int dm_mountinfo_read(dm_mountinfo_line_callback_fn read_fn, void *cb_data)

View File

@ -265,7 +265,7 @@ struct load_properties {
/* /*
* Preload tree normally only loads and not resume, but there is * Preload tree normally only loads and not resume, but there is
* automatic resume when target is extended, as it's believed * automatic resume when target is extended, as it's believed
* there can be no i/o flying to this 'new' extedend space * there can be no i/o flying to this 'new' extended space
* from any device above. Reason is that preloaded target above * from any device above. Reason is that preloaded target above
* may actually need to see its bigger subdevice before it * may actually need to see its bigger subdevice before it
* gets suspended. As long as devices are simple linears * gets suspended. As long as devices are simple linears
@ -277,7 +277,7 @@ struct load_properties {
/* /*
* When comparing table lines to decide if a reload is * When comparing table lines to decide if a reload is
* needed, ignore any differences betwen the lvm device * needed, ignore any differences between the lvm device
* params and the kernel-reported device params. * params and the kernel-reported device params.
* dm-integrity reports many internal parameters on the * dm-integrity reports many internal parameters on the
* table line when lvm does not explicitly set them, * table line when lvm does not explicitly set them,
@ -288,8 +288,8 @@ struct load_properties {
/* /*
* Call node_send_messages(), set to 2 if there are messages * Call node_send_messages(), set to 2 if there are messages
* When != 0, it validates matching transaction id, thus thin-pools * When != 0, it validates matching transaction id, thus thin-pools
* where transation_id is passed as 0 are never validated, this * where transaction_id is passed as 0 are never validated, this
* allows external managment of thin-pool TID. * allows external management of thin-pool TID.
*/ */
unsigned send_messages; unsigned send_messages;
/* Skip suspending node's children, used when sending messages to thin-pool */ /* Skip suspending node's children, used when sending messages to thin-pool */
@ -1816,7 +1816,7 @@ static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
if (info.open_count) { if (info.open_count) {
/* Skip internal non-toplevel opened nodes */ /* Skip internal non-toplevel opened nodes */
/* On some old udev systems without corrrect udev rules /* On some old udev systems without correct udev rules
* this hack avoids 'leaking' active _mimageX legs after * this hack avoids 'leaking' active _mimageX legs after
* deactivation of mirror LV. Other suffixes are not added * deactivation of mirror LV. Other suffixes are not added
* since it's expected newer systems with wider range of * since it's expected newer systems with wider range of
@ -2182,7 +2182,7 @@ int dm_tree_activate_children(struct dm_tree_node *dnode,
/* /*
* FIXME: Implement delayed error reporting * FIXME: Implement delayed error reporting
* activation should be stopped only in the case, * activation should be stopped only in the case,
* the submission of transation_id message fails, * the submission of transaction_id message fails,
* resume should continue further, just whole command * resume should continue further, just whole command
* has to report failure. * has to report failure.
*/ */
@ -2274,7 +2274,7 @@ static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *
return 1; return 1;
} }
/* simplify string emiting code */ /* simplify string emitting code */
#define EMIT_PARAMS(p, str...)\ #define EMIT_PARAMS(p, str...)\
do {\ do {\
int w;\ int w;\
@ -2970,7 +2970,7 @@ static int _vdo_emit_segment_line(struct dm_task *dmt, uint32_t major, uint32_t
* If there is already running VDO target, read 'existing' virtual size out of table line * If there is already running VDO target, read 'existing' virtual size out of table line
* and avoid reading it them from VDO metadata device * and avoid reading it them from VDO metadata device
* *
* NOTE: ATM VDO virtual size can be ONLY extended thus it's simple to recongnize 'right' size. * NOTE: ATM VDO virtual size can be ONLY extended thus it's simple to recognize 'right' size.
* However if there would be supported also reduction, this check would need to check range. * However if there would be supported also reduction, this check would need to check range.
*/ */
if ((vdo_dmt = dm_task_create(DM_DEVICE_TABLE))) { if ((vdo_dmt = dm_task_create(DM_DEVICE_TABLE))) {
@ -3382,7 +3382,7 @@ int dm_tree_preload_children(struct dm_tree_node *dnode,
if (!child->info.exists && !(node_created = _create_node(child, dnode))) if (!child->info.exists && !(node_created = _create_node(child, dnode)))
return_0; return_0;
/* Propagate delayed resume from exteded child node */ /* Propagate delayed resume from extended child node */
if (child->props.delay_resume_if_extended) if (child->props.delay_resume_if_extended)
dnode->props.delay_resume_if_extended = 1; dnode->props.delay_resume_if_extended = 1;
@ -3818,7 +3818,7 @@ int dm_tree_node_add_raid_target(struct dm_tree_node *node,
* - maximum 253 legs in a raid set (MD kernel limitation) * - maximum 253 legs in a raid set (MD kernel limitation)
* - delta_disks for disk add/remove reshaping * - delta_disks for disk add/remove reshaping
* - data_offset for out-of-place reshaping * - data_offset for out-of-place reshaping
* - data_copies to cope witth odd numbers of raid10 disks * - data_copies to cope with odd numbers of raid10 disks
*/ */
int dm_tree_node_add_raid_target_with_params_v2(struct dm_tree_node *node, int dm_tree_node_add_raid_target_with_params_v2(struct dm_tree_node *node,
uint64_t size, uint64_t size,

View File

@ -459,7 +459,7 @@ static int _report_field_string_list(struct dm_report *rh,
* *
* The very first item in the array of 'struct pos_len' is always * The very first item in the array of 'struct pos_len' is always
* a pair denoting '[list_size,strlen(field->report_string)]'. The * a pair denoting '[list_size,strlen(field->report_string)]'. The
* rest of items denote start and lenght of each item in the list. * rest of items denote start and length of each item in the list.
* *
* *
* For example, if we have a list with "abc", "xy", "defgh" * For example, if we have a list with "abc", "xy", "defgh"
@ -1396,7 +1396,7 @@ struct dm_report *dm_report_init(uint32_t *report_types,
} }
/* /*
* Return updated types value for further compatility check by caller. * Return updated types value for further compatibility check by caller.
*/ */
_dm_report_init_update_types(rh, report_types); _dm_report_init_update_types(rh, report_types);
@ -4025,7 +4025,7 @@ error:
return NULL; return NULL;
} }
/* AND_EXPRESSION := EX (AND_OP AND_EXPRSSION) */ /* AND_EXPRESSION := EX (AND_OP AND_EXPRESSION) */
static struct selection_node *_parse_and_ex(struct dm_report *rh, static struct selection_node *_parse_and_ex(struct dm_report *rh,
const char *s, const char *s,
const char **next, const char **next,

View File

@ -62,7 +62,7 @@
* *
* The UUID contained in the dm_ulog_request structure is the reference that * The UUID contained in the dm_ulog_request structure is the reference that
* will be used by all request types to a specific log. The constructor must * will be used by all request types to a specific log. The constructor must
* record this assotiation with the instance created. * record this association with the instance created.
* *
* When the request has been processed, user-space must return the * When the request has been processed, user-space must return the
* dm_ulog_request to the kernel - setting the 'error' field, filling the * dm_ulog_request to the kernel - setting the 'error' field, filling the

View File

@ -129,7 +129,7 @@ int dm_pool_locked(struct dm_pool *p)
* Bool specifies whether to store the pool crc/hash checksum. * Bool specifies whether to store the pool crc/hash checksum.
* *
* \return * \return
* 1 (success) when the pool was preperly locked, 0 otherwise. * 1 (success) when the pool was properly locked, 0 otherwise.
*/ */
int dm_pool_lock(struct dm_pool *p, int crc) int dm_pool_lock(struct dm_pool *p, int crc)
{ {

View File

@ -136,7 +136,7 @@ struct vdo_volume_geometry_4 {
struct vdo_index_config index_config; struct vdo_index_config index_config;
} __packed; } __packed;
/* Decoding mostly only some used stucture members */ /* Decoding mostly only some used structure members */
static void _vdo_decode_version(struct vdo_version_number *v) static void _vdo_decode_version(struct vdo_version_number *v)
{ {

View File

@ -855,7 +855,7 @@ int lv_info_with_seg_status(struct cmd_context *cmd,
/* Merge not yet started, still a snapshot... */ /* Merge not yet started, still a snapshot... */
} }
/* Hadle fictional lvm2 snapshot and query snapshotX volume */ /* Handle fictional lvm2 snapshot and query snapshotX volume */
lv_seg = find_snapshot(lv); lv_seg = find_snapshot(lv);
} }
@ -936,7 +936,7 @@ int lv_check_not_in_use(const struct logical_volume *lv, int error_if_used)
log_debug_activation("Retrying open_count check for %s.", log_debug_activation("Retrying open_count check for %s.",
display_lvname(lv)); display_lvname(lv));
if (!lv_info(lv->vg->cmd, lv, 0, &info, 1, 0) || !info.exists) { if (!lv_info(lv->vg->cmd, lv, 0, &info, 1, 0) || !info.exists) {
stack; /* device dissappeared? */ stack; /* device disappeared? */
return 1; return 1;
} else if (!info.open_count) } else if (!info.open_count)
return 1; return 1;
@ -1006,7 +1006,7 @@ int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
int r; int r;
struct dev_manager *dm; struct dev_manager *dm;
/* If mirrored LV is temporarily shrinked to 1 area (= linear), /* If mirrored LV is temporarily shrunk to 1 area (= linear),
* it should be considered in-sync. */ * it should be considered in-sync. */
if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) { if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
*percent = DM_PERCENT_100; *percent = DM_PERCENT_100;
@ -2235,7 +2235,7 @@ static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
(pvmove_lv = find_pvmove_lv_in_lv(lv_pre))) { (pvmove_lv = find_pvmove_lv_in_lv(lv_pre))) {
/* /*
* When starting PVMOVE, suspend participating LVs first * When starting PVMOVE, suspend participating LVs first
* with committed metadata by looking at precommited pvmove list. * with committed metadata by looking at precommitted pvmove list.
* In committed metadata these LVs are not connected in any way. * In committed metadata these LVs are not connected in any way.
* *
* TODO: prepare list of LVs needed to be suspended and pass them * TODO: prepare list of LVs needed to be suspended and pass them
@ -2264,7 +2264,7 @@ static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
log_error("lv_list alloc failed."); log_error("lv_list alloc failed.");
goto out; goto out;
} }
/* Look for precommitted LV name in commmitted VG */ /* Look for precommitted LV name in committed VG */
if (!(lvl->lv = find_lv(lv->vg, lv_tmp->name))) { if (!(lvl->lv = find_lv(lv->vg, lv_tmp->name))) {
log_error(INTERNAL_ERROR "LV %s missing from preload metadata.", log_error(INTERNAL_ERROR "LV %s missing from preload metadata.",
display_lvname(lv_tmp)); display_lvname(lv_tmp));
@ -2524,7 +2524,7 @@ int lv_deactivate(struct cmd_context *cmd, const char *lvid_s, const struct logi
/* /*
* Remove any transiently activated error * Remove any transiently activated error
* devices which arean't used any more. * devices which aren't used any more.
*/ */
if (r && lv_is_raid(lv) && !lv_deactivate_any_missing_subdevs(lv)) { if (r && lv_is_raid(lv) && !lv_deactivate_any_missing_subdevs(lv)) {
log_error("Failed to remove temporary SubLVs from %s", log_error("Failed to remove temporary SubLVs from %s",
@ -2771,7 +2771,7 @@ static int _lv_remove_any_missing_subdevs(struct logical_volume *lv)
return 1; return 1;
} }
/* Remove any "*-missing_*" sub devices added by the activation layer for an rmate/rimage missing PV mapping */ /* Remove any "*-missing_*" sub devices added by the activation layer for an rmeta/rimage missing PV mapping */
int lv_deactivate_any_missing_subdevs(const struct logical_volume *lv) int lv_deactivate_any_missing_subdevs(const struct logical_volume *lv)
{ {
uint32_t s; uint32_t s;
@ -2849,7 +2849,7 @@ static int _component_cb(struct logical_volume *lv, void *data)
* Finds out for any LV if any of its component LVs are active. * Finds out for any LV if any of its component LVs are active.
* Function first checks if an existing LV is visible and active eventually * Function first checks if an existing LV is visible and active eventually
* it's lock holding LV is already active. In such case sub LV cannot be * it's lock holding LV is already active. In such case sub LV cannot be
* actived alone and no further checking is needed. * activated alone and no further checking is needed.
* *
* Returns active component LV if there is such. * Returns active component LV if there is such.
*/ */
@ -2912,7 +2912,7 @@ static int _deactivate_sub_lv_cb(struct logical_volume *lv, void *data)
} }
/* /*
* Deactivates LV toghether with explicit deactivation call made also for all its component LVs. * Deactivates LV together with explicit deactivation call made also for all its component LVs.
*/ */
int deactivate_lv_with_sub_lv(const struct logical_volume *lv) int deactivate_lv_with_sub_lv(const struct logical_volume *lv)
{ {

View File

@ -840,7 +840,7 @@ int dm_device_is_usable(struct cmd_context *cmd, struct device *dev, struct dev_
* *
* This is a quick check for now, but replace it with more * This is a quick check for now, but replace it with more
* robust and better check that would check the stack * robust and better check that would check the stack
* correctly, not just snapshots but any cobimnation possible * correctly, not just snapshots but any combination possible
* in a stack - use proper dm tree to check this instead. * in a stack - use proper dm tree to check this instead.
*/ */
if (check.check_suspended && if (check.check_suspended &&
@ -2340,7 +2340,7 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
* Check for device holders (ATM used only for removed pvmove targets) * Check for device holders (ATM used only for removed pvmove targets)
* and add them into dtree structures. * and add them into dtree structures.
* When 'laopts != NULL' add them as new nodes - which also corrects READ_AHEAD. * When 'laopts != NULL' add them as new nodes - which also corrects READ_AHEAD.
* Note: correct table are already explicitelly PRELOADED. * Note: correct table are already explicitly PRELOADED.
*/ */
static int _check_holder(struct dev_manager *dm, struct dm_tree *dtree, static int _check_holder(struct dev_manager *dm, struct dm_tree *dtree,
const struct logical_volume *lv, const struct logical_volume *lv,
@ -2887,7 +2887,7 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
.send_messages = 1 /* Node with messages */ .send_messages = 1 /* Node with messages */
}; };
/* /*
* Add some messsages if right node exist in the table only * Add some messages if right node exist in the table only
* when building SUSPEND tree for origin-only thin-pool. * when building SUSPEND tree for origin-only thin-pool.
* *
* TODO: Fix call of '_add_target_to_dtree()' to add message * TODO: Fix call of '_add_target_to_dtree()' to add message
@ -3613,7 +3613,7 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
* so just use the tree's existing nodes' info * so just use the tree's existing nodes' info
*/ */
if ((dinfo = _cached_dm_info(dm->mem, dtree, lv, NULL))) { if ((dinfo = _cached_dm_info(dm->mem, dtree, lv, NULL))) {
/* Merging origin LV is present, check if mergins is already running. */ /* Merging origin LV is present, check if merging is already running. */
if ((seg_is_thin_volume(seg) && _lv_has_thin_device_id(dm->mem, lv, NULL, seg->device_id)) || if ((seg_is_thin_volume(seg) && _lv_has_thin_device_id(dm->mem, lv, NULL, seg->device_id)) ||
(!seg_is_thin_volume(seg) && lv_has_target_type(dm->mem, lv, NULL, TARGET_NAME_SNAPSHOT_MERGE))) { (!seg_is_thin_volume(seg) && lv_has_target_type(dm->mem, lv, NULL, TARGET_NAME_SNAPSHOT_MERGE))) {
log_debug_activation("Merging of snapshot volume %s to origin %s is in progress.", log_debug_activation("Merging of snapshot volume %s to origin %s is in progress.",

View File

@ -1462,7 +1462,7 @@ void lvmcache_extra_md_component_checks(struct cmd_context *cmd)
* not careful to do it only when there's a good reason to believe a * not careful to do it only when there's a good reason to believe a
* dev is an md component. * dev is an md component.
* *
* If the pv/dev size mismatches are commonly occuring for * If the pv/dev size mismatches are commonly occurring for
* non-md-components then we'll want to stop using that as a trigger * non-md-components then we'll want to stop using that as a trigger
* for the full md check. * for the full md check.
*/ */
@ -1547,7 +1547,7 @@ void lvmcache_extra_md_component_checks(struct cmd_context *cmd)
* incorrectly placed PVs should have been moved from the orphan vginfo * incorrectly placed PVs should have been moved from the orphan vginfo
* onto their correct vginfo's, and the orphan vginfo should (in theory) * onto their correct vginfo's, and the orphan vginfo should (in theory)
* represent only real orphan PVs. (Note: if lvmcache_label_scan is run * represent only real orphan PVs. (Note: if lvmcache_label_scan is run
* after vg_read udpates to lvmcache state, then the lvmcache will be * after vg_read updates to lvmcache state, then the lvmcache will be
* incorrect again, so do not run lvmcache_label_scan during the * incorrect again, so do not run lvmcache_label_scan during the
* processing phase.) * processing phase.)
* *
@ -2243,7 +2243,7 @@ int lvmcache_update_vgname_and_id(struct cmd_context *cmd, struct lvmcache_info
if (!_lvmcache_update_vgstatus(info, vgsummary->vgstatus, vgsummary->creation_host, if (!_lvmcache_update_vgstatus(info, vgsummary->vgstatus, vgsummary->creation_host,
vgsummary->lock_type, vgsummary->system_id)) { vgsummary->lock_type, vgsummary->system_id)) {
/* /*
* This shouldn't happen, it's an internal errror, and we can leave * This shouldn't happen, it's an internal error, and we can leave
* the info in place without saving the summary values in vginfo. * the info in place without saving the summary values in vginfo.
*/ */
log_error("Failed to update VG %s info in lvmcache.", vgname); log_error("Failed to update VG %s info in lvmcache.", vgname);

View File

@ -82,7 +82,7 @@ static void _cache_display(const struct lv_segment *seg)
/* /*
* When older metadata are loaded without newer settings, * When older metadata are loaded without newer settings,
* set then to default settings (the one that could have been * set then to default settings (the one that could have been
* used implicitely at that time). * used implicitly at that time).
* *
* Needs both segments cache and cache_pool to be loaded. * Needs both segments cache and cache_pool to be loaded.
*/ */
@ -144,8 +144,8 @@ static int _settings_text_import(struct lv_segment *seg,
* Read in policy args: * Read in policy args:
* policy_settings { * policy_settings {
* migration_threshold=2048 * migration_threshold=2048
* sequention_threashold=100 * sequential_threshold=100
* random_threashold=200 * random_threshold=200
* read_promote_adjustment=10 * read_promote_adjustment=10
* write_promote_adjustment=20 * write_promote_adjustment=20
* discard_promote_adjustment=40 * discard_promote_adjustment=40
@ -706,7 +706,7 @@ static int _cache_add_target_line(struct dev_manager *dm,
} }
}; };
/* Check if cache settings are acceptable to knownm policies */ /* Check if cache settings are acceptable to known policies */
for (i = 0; i < DM_ARRAY_SIZE(_accepted); i++) { for (i = 0; i < DM_ARRAY_SIZE(_accepted); i++) {
if (strcasecmp(cache_pool_seg->policy_name, _accepted[i].name)) if (strcasecmp(cache_pool_seg->policy_name, _accepted[i].name))
continue; continue;

View File

@ -1228,7 +1228,7 @@ static struct dev_filter *_init_filter_chain(struct cmd_context *cmd)
* sysfs filter. Only available on 2.6 kernels. Non-critical. * sysfs filter. Only available on 2.6 kernels. Non-critical.
* Eliminates unavailable devices. * Eliminates unavailable devices.
* TODO: this may be unnecessary now with device ids * TODO: this may be unnecessary now with device ids
* (currently not used for devs match to device id using syfs) * (currently not used for devs match to device id using sysfs)
*/ */
if (find_config_tree_bool(cmd, devices_sysfs_scan_CFG, NULL)) { if (find_config_tree_bool(cmd, devices_sysfs_scan_CFG, NULL)) {
if ((filters[nr_filt] = sysfs_filter_create(dm_sysfs_dir()))) if ((filters[nr_filt] = sysfs_filter_create(dm_sysfs_dir())))
@ -1312,7 +1312,7 @@ int init_filters(struct cmd_context *cmd, unsigned load_persistent_cache)
init_ignore_lvm_mirrors(find_config_tree_bool(cmd, devices_ignore_lvm_mirrors_CFG, NULL)); init_ignore_lvm_mirrors(find_config_tree_bool(cmd, devices_ignore_lvm_mirrors_CFG, NULL));
/* /*
* persisent filter is a cache of the previous result real filter result. * persistent filter is a cache of the previous result real filter result.
* If a dev is found in persistent filter, the pass/fail result saved by * If a dev is found in persistent filter, the pass/fail result saved by
* the pfilter is used. If a dev does not existing in the persistent * the pfilter is used. If a dev does not existing in the persistent
* filter, the dev is passed on to the real filter, and when the result * filter, the dev is passed on to the real filter, and when the result

View File

@ -507,7 +507,7 @@ int config_file_read_fd(struct dm_config_tree *cft, struct device *dev, dev_io_r
use_plain_read = 0; use_plain_read = 0;
/* Ensure there is extra '\0' after end of buffer since we pass /* Ensure there is extra '\0' after end of buffer since we pass
* buffer to funtions like strtoll() */ * buffer to functions like strtoll() */
if (!(buf = zalloc(size + size2 + 1))) { if (!(buf = zalloc(size + size2 + 1))) {
log_error("Failed to allocate circular buffer."); log_error("Failed to allocate circular buffer.");
return 0; return 0;

View File

@ -112,7 +112,7 @@ typedef union {
* and whether it can be attached to VG/LV metadata at the same time * and whether it can be attached to VG/LV metadata at the same time
* The CFG_PROFILABLE_METADATA flag incorporates CFG_PROFILABLE flag!!! */ * The CFG_PROFILABLE_METADATA flag incorporates CFG_PROFILABLE flag!!! */
#define CFG_PROFILABLE_METADATA 0x0030 #define CFG_PROFILABLE_METADATA 0x0030
/* whether the default value is undefned */ /* whether the default value is undefined */
#define CFG_DEFAULT_UNDEFINED 0x0040 #define CFG_DEFAULT_UNDEFINED 0x0040
/* whether the default value is commented out on output */ /* whether the default value is commented out on output */
#define CFG_DEFAULT_COMMENTED 0x0080 #define CFG_DEFAULT_COMMENTED 0x0080
@ -218,7 +218,7 @@ struct cft_check_handle {
unsigned skip_if_checked:1; /* skip the check if already done before - return last state */ unsigned skip_if_checked:1; /* skip the check if already done before - return last state */
unsigned suppress_messages:1; /* suppress messages during the check if config item is found invalid */ unsigned suppress_messages:1; /* suppress messages during the check if config item is found invalid */
unsigned check_diff:1; /* check if the value used differs from default one */ unsigned check_diff:1; /* check if the value used differs from default one */
unsigned ignoreadvanced:1; /* do not include advnced configs */ unsigned ignoreadvanced:1; /* do not include advanced configs */
unsigned ignoreunsupported:1; /* do not include unsupported configs */ unsigned ignoreunsupported:1; /* do not include unsupported configs */
uint16_t disallowed_flags; /* set of disallowed flags */ uint16_t disallowed_flags; /* set of disallowed flags */
uint8_t status[CFG_COUNT]; /* flags for each configuration item - the result of the check */ uint8_t status[CFG_COUNT]; /* flags for each configuration item - the result of the check */

View File

@ -47,7 +47,7 @@
* *
* flags: Configuration item flags: * flags: Configuration item flags:
* CFG_NAME_VARIABLE - configuration node name is variable * CFG_NAME_VARIABLE - configuration node name is variable
* CFG_ALLOW_EMPTY - node value can be emtpy * CFG_ALLOW_EMPTY - node value can be empty
* CFG_ADVANCED - this node belongs to advanced config set * CFG_ADVANCED - this node belongs to advanced config set
* CFG_UNSUPPORTED - this node is not officially supported and it's used primarily by developers * CFG_UNSUPPORTED - this node is not officially supported and it's used primarily by developers
* CFG_PROFILABLE - this node is customizable by a profile * CFG_PROFILABLE - this node is customizable by a profile
@ -59,7 +59,7 @@
* CFG_SECTION_NO_CHECK - do not check content of the section at all - use with care!!! * CFG_SECTION_NO_CHECK - do not check content of the section at all - use with care!!!
* CFG_DISALLOW_INTERACTIVE - disallow configuration node for use in interactive environment (e.g. cmds run in lvm shell) * CFG_DISALLOW_INTERACTIVE - disallow configuration node for use in interactive environment (e.g. cmds run in lvm shell)
* *
* type: Allowed type for the value of simple configuation setting, one of: * type: Allowed type for the value of simple configuration setting, one of:
* CFG_TYPE_BOOL * CFG_TYPE_BOOL
* CFG_TYPE_INT * CFG_TYPE_INT
* CFG_TYPE_FLOAT * CFG_TYPE_FLOAT

View File

@ -183,7 +183,7 @@
#define DEFAULT_VDO_FORMAT_OPTIONS_CONFIG "#S" "" #define DEFAULT_VDO_FORMAT_OPTIONS_CONFIG "#S" ""
/* /*
* VDO pool will reverve some sectors in the front and the back of pool device to avoid * VDO pool will reserve some sectors in the front and the back of pool device to avoid
* seeing same device twice in the system. * seeing same device twice in the system.
*/ */
#define DEFAULT_VDO_POOL_HEADER_SIZE_KB (512) #define DEFAULT_VDO_POOL_HEADER_SIZE_KB (512)

View File

@ -666,7 +666,7 @@ struct bcache {
struct block *raw_blocks; struct block *raw_blocks;
/* /*
* Lists that categorise the blocks. * Lists that categorize the blocks.
*/ */
unsigned nr_locked; unsigned nr_locked;
unsigned nr_dirty; unsigned nr_dirty;
@ -1502,7 +1502,7 @@ int bcache_set_fd(int fd)
} }
/* /*
* Should we check for unflushed or inprogress io on an fd * Should we check for unflushed or in-progress io on an fd
* prior to doing clear_fd or change_fd? (To catch mistakes; * prior to doing clear_fd or change_fd? (To catch mistakes;
* the caller should be smart enough to not do that.) * the caller should be smart enough to not do that.)
*/ */

View File

@ -148,7 +148,7 @@ void bcache_abort_di(struct bcache *cache, int di);
//---------------------------------------------------------------- //----------------------------------------------------------------
// The next four functions are utilities written in terms of the above api. // The next four functions are utilities written in terms of the above api.
// Prefetches the blocks neccessary to satisfy a byte range. // Prefetches the blocks necessary to satisfy a byte range.
void bcache_prefetch_bytes(struct bcache *cache, int di, uint64_t start, size_t len); void bcache_prefetch_bytes(struct bcache *cache, int di, uint64_t start, size_t len);
// Reads, writes and zeroes bytes. Returns false if errors occur. // Reads, writes and zeroes bytes. Returns false if errors occur.

View File

@ -836,7 +836,7 @@ static size_t _collapse_slashes(char *str)
static int _insert_dir(const char *dir) static int _insert_dir(const char *dir)
{ {
/* alphanetically! sorted list used by bsearch of /* alphabetically! sorted list used by bsearch of
* /dev subdirectories that should not contain * /dev subdirectories that should not contain
* any block device, so no reason to scan them. */ * any block device, so no reason to scan them. */
static const char _no_scan[][12] = { static const char _no_scan[][12] = {

View File

@ -36,7 +36,7 @@
/* /*
* struct dasd_information2_t * struct dasd_information2_t
* represents any data about the device, which is visible to userspace. * represents any data about the device, which is visible to userspace.
* including foramt and featueres. * including format and features.
*/ */
typedef struct dasd_information2_t { typedef struct dasd_information2_t {
unsigned int devno; /* S/390 devno */ unsigned int devno; /* S/390 devno */

View File

@ -13,7 +13,7 @@
*/ */
/************************************************************************* /*************************************************************************
* Properties saved in udev db and accesible via libudev and used by LVM * * Properties saved in udev db and accessible via libudev and used by LVM *
*************************************************************************/ *************************************************************************/
/* /*

View File

@ -234,9 +234,9 @@ static void _read_wwid_file(const char *config_wwids_file, int *entries)
/* /*
* the initial character is the id type, * the initial character is the id type,
* 1 is t10, 2 is eui, 3 is naa, 8 is scsi name. * 1 is t10, 2 is eui, 3 is naa, 8 is scsi name.
* wwids are stored in the hash table without the type charater. * wwids are stored in the hash table without the type character.
* It seems that sometimes multipath does not include * It seems that sometimes multipath does not include
* the type charater (seen with t10 scsi_debug devs). * the type character (seen with t10 scsi_debug devs).
*/ */
typestr[0] = *wwid; typestr[0] = *wwid;
if (typestr[0] == '1' || typestr[0] == '2' || typestr[0] == '3') if (typestr[0] == '1' || typestr[0] == '2' || typestr[0] == '3')

View File

@ -2036,7 +2036,7 @@ id_done:
* use_devices list will be used to update the devices file. * use_devices list will be used to update the devices file.
* *
* The dev being added can potentially overlap existing entries * The dev being added can potentially overlap existing entries
* in various ways. If one of the existing entries is truely for * in various ways. If one of the existing entries is truly for
* this device being added, then we want to update that entry. * this device being added, then we want to update that entry.
* If some other existing entries are not for the same device, but * If some other existing entries are not for the same device, but
* have some overlapping values, then we want to try to update * have some overlapping values, then we want to try to update
@ -2098,7 +2098,7 @@ id_done:
/* /*
* If one of the existing entries (du_dev, du_pvid, du_devid, du_devname) * If one of the existing entries (du_dev, du_pvid, du_devid, du_devname)
* is truely for the same device that is being added, then set update_du to * is truly for the same device that is being added, then set update_du to
* that existing entry to be updated. * that existing entry to be updated.
*/ */
@ -3925,7 +3925,7 @@ void device_ids_search(struct cmd_context *cmd, struct dm_list *new_devs,
} }
/* /*
* The use_devices entries (repesenting the devices file) are * The use_devices entries (representing the devices file) are
* updated for the new devices on which the PVs reside. The new * updated for the new devices on which the PVs reside. The new
* correct devs are set as dil->dev on search_pvids entries. * correct devs are set as dil->dev on search_pvids entries.
* *

View File

@ -331,7 +331,7 @@ check_duplicate:
* duplicate PV. * duplicate PV.
* *
* FIXME: disable autoactivation of the VG somehow? * FIXME: disable autoactivation of the VG somehow?
* The VG may or may not already be activated when a dupicate appears. * The VG may or may not already be activated when a duplicate appears.
* Perhaps write a new field in the pv online or vg online file? * Perhaps write a new field in the pv online or vg online file?
*/ */

View File

@ -32,7 +32,7 @@ static const struct {
{ {
ALLOC_CONTIGUOUS, "contiguous", 'c'}, { ALLOC_CONTIGUOUS, "contiguous", 'c'}, {
ALLOC_CLING, "cling", 'l'}, { ALLOC_CLING, "cling", 'l'}, {
ALLOC_CLING_BY_TAGS, "cling_by_tags", 't'}, { /* Only used in log mesgs */ ALLOC_CLING_BY_TAGS, "cling_by_tags", 't'}, { /* Only used in log messages */
ALLOC_NORMAL, "normal", 'n'}, { ALLOC_NORMAL, "normal", 'n'}, {
ALLOC_ANYWHERE, "anywhere", 'a'}, { ALLOC_ANYWHERE, "anywhere", 'a'}, {
ALLOC_INHERIT, "inherit", 'i'} ALLOC_INHERIT, "inherit", 'i'}
@ -383,7 +383,7 @@ static int _lvdisplay_historical_full(struct cmd_context *cmd,
log_print("--- Historical Logical volume ---"); log_print("--- Historical Logical volume ---");
if (lvm1compat) if (lvm1compat)
/* /dev/vgname/lvname doen't actually exist for historical devices */ /* /dev/vgname/lvname doesn't actually exist for historical devices */
log_print("LV Name %s%s/%s", log_print("LV Name %s%s/%s",
hlv->vg->cmd->dev_dir, hlv->vg->name, hlv->name); hlv->vg->cmd->dev_dir, hlv->vg->name, hlv->name);
else else
@ -439,7 +439,7 @@ int lvdisplay_full(struct cmd_context *cmd,
lvm1compat = find_config_tree_bool(cmd, global_lvdisplay_shows_full_device_path_CFG, NULL); lvm1compat = find_config_tree_bool(cmd, global_lvdisplay_shows_full_device_path_CFG, NULL);
if (lvm1compat) if (lvm1compat)
/* /dev/vgname/lvname doen't actually exist for internal devices */ /* /dev/vgname/lvname doesn't actually exist for internal devices */
log_print("LV Name %s%s/%s", log_print("LV Name %s%s/%s",
lv->vg->cmd->dev_dir, lv->vg->name, lv->name); lv->vg->cmd->dev_dir, lv->vg->name, lv->name);
else if (lv_is_visible(lv)) { else if (lv_is_visible(lv)) {

View File

@ -36,7 +36,7 @@
* which can trigger duplicate detection, and/or cause lvm to display * which can trigger duplicate detection, and/or cause lvm to display
* md components as PVs rather than ignoring them. * md components as PVs rather than ignoring them.
* *
* If scanning md componenents causes duplicates to be seen, then * If scanning md components causes duplicates to be seen, then
* the lvm duplicate resolution will exclude the components. * the lvm duplicate resolution will exclude the components.
* *
* The lvm md filter has three modes: * The lvm md filter has three modes:
@ -92,7 +92,7 @@ static int _passes_md_filter(struct cmd_context *cmd, struct dev_filter *f __att
dev->filtered_flags &= ~DEV_FILTERED_MD_COMPONENT; dev->filtered_flags &= ~DEV_FILTERED_MD_COMPONENT;
/* /*
* When md_component_dectection=0, don't even try to skip md * When md_component_detection=0, don't even try to skip md
* components. * components.
*/ */
if (!md_filtering()) if (!md_filtering())

View File

@ -34,7 +34,7 @@ static int _ignore_mpath_component(struct cmd_context *cmd, struct dev_filter *f
dev->filtered_flags |= DEV_FILTERED_MPATH_COMPONENT; dev->filtered_flags |= DEV_FILTERED_MPATH_COMPONENT;
/* /*
* Warn about misconfig where an mpath component is * Warn about misconfigure where an mpath component is
* in the devices file, but its mpath device is not. * in the devices file, but its mpath device is not.
*/ */
if ((dev->flags & DEV_MATCHED_USE_ID) && mpath_devno) { if ((dev->flags & DEV_MATCHED_USE_ID) && mpath_devno) {

View File

@ -245,13 +245,13 @@ static int _sectors_to_units(uint64_t sectors, char *buffer, size_t s)
return dm_snprintf(buffer, s, "# %g %s", d, _units[i]) > 0; return dm_snprintf(buffer, s, "# %g %s", d, _units[i]) > 0;
} }
/* increment indention level */ /* increment indentation level */
void out_inc_indent(struct formatter *f) void out_inc_indent(struct formatter *f)
{ {
_inc_indent(f); _inc_indent(f);
} }
/* decrement indention level */ /* decrement indentation level */
void out_dec_indent(struct formatter *f) void out_dec_indent(struct formatter *f)
{ {
_dec_indent(f); _dec_indent(f);

View File

@ -242,7 +242,7 @@ int read_flags(uint64_t *status, enum pv_vg_lv_e type, int mask, const struct dm
* All UNKNOWN flags will again cause the "UNKNOWN" segtype. * All UNKNOWN flags will again cause the "UNKNOWN" segtype.
* *
* Note: using these segtype status flags instead of actual * Note: using these segtype status flags instead of actual
* status flags ensures wanted incompatiblity. * status flags ensures wanted incompatibility.
*/ */
int read_segtype_lvflags(uint64_t *status, char *segtype_str) int read_segtype_lvflags(uint64_t *status, char *segtype_str)
{ {

View File

@ -642,7 +642,7 @@ static int _vg_write_raw(struct format_instance *fid, struct volume_group *vg,
fidtc->write_buf_size = write_buf_size; fidtc->write_buf_size = write_buf_size;
fidtc->new_metadata_size = new_size; fidtc->new_metadata_size = new_size;
/* Immediatelly reuse existing buffer for parsing metadata back. /* Immediately reuse existing buffer for parsing metadata back.
* Such VG is then used for as precommitted VG and later committed VG. * Such VG is then used for as precommitted VG and later committed VG.
* *
* 'Lazy' creation of such VG might improve performance, but we * 'Lazy' creation of such VG might improve performance, but we

View File

@ -1071,7 +1071,7 @@ static struct volume_group *_read_vg(struct cmd_context *cmd,
mem = vg->vgmem; mem = vg->vgmem;
/* /*
* The pv hash memorises the pv section names -> pv * The pv hash memorizes the pv section names -> pv
* structures. * structures.
*/ */
if (!(pv_hash = dm_hash_create(59))) { if (!(pv_hash = dm_hash_create(59))) {
@ -1080,7 +1080,7 @@ static struct volume_group *_read_vg(struct cmd_context *cmd,
} }
/* /*
* The lv hash memorises the lv section names -> lv * The lv hash memorizes the lv section names -> lv
* structures. * structures.
*/ */
if (!(lv_hash = dm_hash_create(1023))) { if (!(lv_hash = dm_hash_create(1023))) {

View File

@ -92,7 +92,7 @@ struct mda_lists {
struct mda_context { struct mda_context {
struct device_area area; struct device_area area;
uint64_t free_sectors; uint64_t free_sectors;
struct raw_locn rlocn; /* Store inbetween write and commit */ struct raw_locn rlocn; /* Store in-between write and commit */
}; };
/* FIXME Convert this at runtime */ /* FIXME Convert this at runtime */

View File

@ -1455,7 +1455,7 @@ int get_hints(struct cmd_context *cmd, struct dm_list *hints_out, int *newhints,
/* /*
* This is not related to hints, and is probably unnecessary, * This is not related to hints, and is probably unnecessary,
* but it could possibly help. When hints become invalid it's * but it could possibly help. When hints become invalid it's
* usually becaues devs on the system have changed, and that * usually because devs on the system have changed, and that
* also means that a missing devices file entry might be found * also means that a missing devices file entry might be found
* by searching devices again. (the searched_devnames * by searching devices again. (the searched_devnames
* mechanism should eventually be replaced) * mechanism should eventually be replaced)

View File

@ -1403,9 +1403,9 @@ int label_scan(struct cmd_context *cmd)
* *
* If there was an error during scan, we could recreate bcache here * If there was an error during scan, we could recreate bcache here
* with a larger size and then restart label_scan. But, this does not * with a larger size and then restart label_scan. But, this does not
* address the problem of writing new metadata that excedes the bcache * address the problem of writing new metadata that exceeds the bcache
* size and failing, which would often be hit first, i.e. we'll fail * size and failing, which would often be hit first, i.e. we'll fail
* to write new metadata exceding the max size before we have a chance * to write new metadata exceeding the max size before we have a chance
* to read any metadata with that size, unless we find an existing vg * to read any metadata with that size, unless we find an existing vg
* that has been previously created with the larger size. * that has been previously created with the larger size.
* *

View File

@ -1563,7 +1563,7 @@ int lockd_start_wait(struct cmd_context *cmd)
* 4. dlm: * 4. dlm:
* If the lock_type from vgcreate is dlm, lvmlockd creates the * If the lock_type from vgcreate is dlm, lvmlockd creates the
* dlm global lockspace, and queues the global lock request * dlm global lockspace, and queues the global lock request
* for vgcreate. lockd_gl_create returns sucess with the gl held. * for vgcreate. lockd_gl_create returns success with the gl held.
* *
* sanlock: * sanlock:
* If the lock_type from vgcreate is sanlock, lvmlockd returns -ENOLS * If the lock_type from vgcreate is sanlock, lvmlockd returns -ENOLS
@ -1647,7 +1647,7 @@ int lockd_global_create(struct cmd_context *cmd, const char *def_mode, const cha
return 1; return 1;
/* /*
* This is the sanlock bootstrap condition for proceding * This is the sanlock bootstrap condition for proceeding
* without the global lock: a chicken/egg case for the first * without the global lock: a chicken/egg case for the first
* sanlock VG that is created. When creating the first * sanlock VG that is created. When creating the first
* sanlock VG, there is no global lock to acquire because * sanlock VG, there is no global lock to acquire because
@ -2793,7 +2793,7 @@ static int _lockd_lv_vdo(struct cmd_context *cmd, struct logical_volume *lv,
* acquired on the thin pool LV, and a thin LV does not have a lock * acquired on the thin pool LV, and a thin LV does not have a lock
* of its own. A cache pool LV does not have a lock of its own. * of its own. A cache pool LV does not have a lock of its own.
* When the cache pool LV is linked to an origin LV, the lock of * When the cache pool LV is linked to an origin LV, the lock of
* the orgin LV protects the combined origin + cache pool. * the origin LV protects the combined origin + cache pool.
*/ */
int lockd_lv(struct cmd_context *cmd, struct logical_volume *lv, int lockd_lv(struct cmd_context *cmd, struct logical_volume *lv,
@ -2950,7 +2950,7 @@ int lockd_lv_resize(struct cmd_context *cmd, struct logical_volume *lv,
* the LV on remote nodes through dlm/corosync at the end * the LV on remote nodes through dlm/corosync at the end
* of the command. * of the command.
* *
* If lockd_lv sucessfully acquired the LV lock ex (did not * If lockd_lv successfully acquired the LV lock ex (did not
* need to make use of SH_EXISTS_OK), then we know the LV * need to make use of SH_EXISTS_OK), then we know the LV
* is active here only (or not active anywhere) and we * is active here only (or not active anywhere) and we
* don't need to do any remote refresh. * don't need to do any remote refresh.

View File

@ -313,7 +313,7 @@ no_epoch:
} }
/* /*
* Unlink the log file depeding on command's return value * Unlink the log file depending on command's return value
* *
* When envvar LVM_EXPECTED_EXIT_STATUS is set, compare * When envvar LVM_EXPECTED_EXIT_STATUS is set, compare
* resulting status with this string. * resulting status with this string.
@ -880,12 +880,12 @@ void log_set_report_object_name_and_id(const char *name, const char *id)
* For this we need to be able to clearly identify when a command is * For this we need to be able to clearly identify when a command is
* being run by dmeventd/lvmpolld/lvmdbusd. * being run by dmeventd/lvmpolld/lvmdbusd.
* *
* TODO: log/journal_commmand_names=["lvcreate","lvconvert"] * TODO: log/journal_command_names=["lvcreate","lvconvert"]
* This would restrict log/journal=["command"] to the listed command names. * This would restrict log/journal=["command"] to the listed command names.
* Also allow "!command" to exclude a command, e.g. ["!pvs"] * Also allow "!command" to exclude a command, e.g. ["!pvs"]
* *
* TODO: log/journal_daemon_command_names=["lvcreate","lvconvert"] * TODO: log/journal_daemon_command_names=["lvcreate","lvconvert"]
* This would restrict log/journal=["dameon_command"] to the listed command names. * This would restrict log/journal=["daemon_command"] to the listed command names.
* *
* TODO: log/journal_daemon_names=["dmeventd"] * TODO: log/journal_daemon_names=["dmeventd"]
* This would restrict log/journal=["daemon_command"] to commands run by * This would restrict log/journal=["daemon_command"] to commands run by

View File

@ -223,7 +223,7 @@ int update_cache_pool_params(struct cmd_context *cmd,
if (*chunk_size < min_chunk_size) { if (*chunk_size < min_chunk_size) {
/* /*
* When using more then 'standard' default, * When using more then 'standard' default,
* keep user informed he might be using things in untintended direction * keep user informed he might be using things in unintended direction
*/ */
log_print_unless_silent("Using %s chunk size instead of default %s, " log_print_unless_silent("Using %s chunk size instead of default %s, "
"so cache pool has less than " FMTu64 " chunks.", "so cache pool has less than " FMTu64 " chunks.",
@ -572,7 +572,7 @@ int lv_cache_remove(struct logical_volume *cache_lv)
goto remove; /* Already dropped */ goto remove; /* Already dropped */
} }
/* Localy active volume is needed for writeback */ /* Locally active volume is needed for writeback */
if (!lv_info(cache_lv->vg->cmd, cache_lv, 1, NULL, 0, 0)) { if (!lv_info(cache_lv->vg->cmd, cache_lv, 1, NULL, 0, 0)) {
/* Give up any remote locks */ /* Give up any remote locks */
if (!deactivate_lv_with_sub_lv(cache_lv)) if (!deactivate_lv_with_sub_lv(cache_lv))
@ -593,7 +593,7 @@ int lv_cache_remove(struct logical_volume *cache_lv)
return_0; return_0;
return 1; return 1;
default: default:
/* Otherwise localy activate volume to sync dirty blocks */ /* Otherwise locally activate volume to sync dirty blocks */
cache_lv->status |= LV_TEMPORARY; cache_lv->status |= LV_TEMPORARY;
if (!activate_lv(cache_lv->vg->cmd, cache_lv) || if (!activate_lv(cache_lv->vg->cmd, cache_lv) ||
!lv_is_active(cache_lv)) { !lv_is_active(cache_lv)) {
@ -692,7 +692,7 @@ remove:
if (!lv_remove(cache_lv)) /* Will use LV_PENDING_DELETE */ if (!lv_remove(cache_lv)) /* Will use LV_PENDING_DELETE */
return_0; return_0;
/* CachePool or CacheVol is left inactivate for further manipulation */ /* CachePool or CacheVol is left inactive for further manipulation */
return 1; return 1;
} }
@ -923,7 +923,7 @@ int cache_set_metadata_format(struct lv_segment *seg, cache_metadata_format_t fo
} }
/* See what is a 'best' available cache metadata format /* See what is a 'best' available cache metadata format
* when the specifed format is other then always existing CMFormat 1 */ * when the specified format is other then always existing CMFormat 1 */
if (format != CACHE_METADATA_FORMAT_1) { if (format != CACHE_METADATA_FORMAT_1) {
best = _get_default_cache_metadata_format(seg->lv->vg->cmd); best = _get_default_cache_metadata_format(seg->lv->vg->cmd);

View File

@ -383,7 +383,7 @@ dm_percent_t lvseg_percent_with_info_and_seg_status(const struct lv_with_info_an
* TODO: * TODO:
* Later move to segment methods, instead of using single place. * Later move to segment methods, instead of using single place.
* Also handle logic for mirror segments and it total_* summing * Also handle logic for mirror segments and it total_* summing
* Esentially rework _target_percent API for segtype. * Essentially rework _target_percent API for segtype.
*/ */
switch (s->type) { switch (s->type) {
case SEG_STATUS_INTEGRITY: case SEG_STATUS_INTEGRITY:
@ -1495,7 +1495,7 @@ char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_
repstr[8] = 'm'; /* RAID has 'm'ismatches */ repstr[8] = 'm'; /* RAID has 'm'ismatches */
else if (lv_raid_sync_action(lv, &sync_action) && else if (lv_raid_sync_action(lv, &sync_action) &&
!strcmp(sync_action, "reshape")) !strcmp(sync_action, "reshape"))
repstr[8] = 's'; /* LV is re(s)haping */ repstr[8] = 's'; /* LV is re's'haping */
else if (_sublvs_remove_after_reshape(lv)) else if (_sublvs_remove_after_reshape(lv))
repstr[8] = 'R'; /* sub-LV got freed from raid set by reshaping repstr[8] = 'R'; /* sub-LV got freed from raid set by reshaping
and has to be 'R'emoved */ and has to be 'R'emoved */
@ -1708,7 +1708,7 @@ const struct logical_volume *lv_lock_holder(const struct logical_volume *lv)
if (lv_is_thin_pool(lv) || if (lv_is_thin_pool(lv) ||
lv_is_external_origin(lv)) { lv_is_external_origin(lv)) {
/* FIXME: Ensure cluster keeps thin-pool active exlusively. /* FIXME: Ensure cluster keeps thin-pool active exclusively.
* External origin can be activated on more nodes (depends on type). * External origin can be activated on more nodes (depends on type).
*/ */
if (!lv_is_active(lv)) if (!lv_is_active(lv))
@ -1722,7 +1722,7 @@ const struct logical_volume *lv_lock_holder(const struct logical_volume *lv)
return lv; return lv;
} }
/* RAID changes visibility of splitted LVs but references them still as leg/meta */ /* RAID changes visibility of split LVs but references them still as leg/meta */
if ((lv_is_raid_image(lv) || lv_is_raid_metadata(lv)) && lv_is_visible(lv)) if ((lv_is_raid_image(lv) || lv_is_raid_metadata(lv)) && lv_is_visible(lv))
return lv; return lv;
@ -1735,7 +1735,7 @@ const struct logical_volume *lv_lock_holder(const struct logical_volume *lv)
if (lv_is_thin_volume(lv) && if (lv_is_thin_volume(lv) &&
lv_is_thin_volume(sl->seg->lv) && lv_is_thin_volume(sl->seg->lv) &&
first_seg(lv)->pool_lv == sl->seg->pool_lv) first_seg(lv)->pool_lv == sl->seg->pool_lv)
continue; /* Skip thin snaphost */ continue; /* Skip thin snapshot */
if (lv_is_pending_delete(sl->seg->lv)) if (lv_is_pending_delete(sl->seg->lv))
continue; /* Skip deleted LVs */ continue; /* Skip deleted LVs */
if (lv_is_cache_pool(sl->seg->lv) && if (lv_is_cache_pool(sl->seg->lv) &&

View File

@ -1450,7 +1450,7 @@ static struct logical_volume *_get_resizable_layer_lv(struct logical_volume *lv)
/* Check if LV is component of resizable LV. /* Check if LV is component of resizable LV.
* When resize changes size of LV this also changes the size whole stack upward. * When resize changes size of LV this also changes the size whole stack upward.
* Support syntax suggar - so user can pick any LV in stack for resize. */ * Support syntax sugar - so user can pick any LV in stack for resize. */
static int _is_layered_lv(struct logical_volume *lv) static int _is_layered_lv(struct logical_volume *lv)
{ {
return (lv_is_cache_origin(lv) || return (lv_is_cache_origin(lv) ||
@ -1736,7 +1736,7 @@ int lv_refresh_suspend_resume(const struct logical_volume *lv)
/* /*
* Remove any transiently activated error * Remove any transiently activated error
* devices which arean't used any more. * devices which aren't used any more.
*/ */
if (lv_is_raid(lv) && !lv_deactivate_any_missing_subdevs(lv)) { if (lv_is_raid(lv) && !lv_deactivate_any_missing_subdevs(lv)) {
log_error("Failed to remove temporary SubLVs from %s", display_lvname(lv)); log_error("Failed to remove temporary SubLVs from %s", display_lvname(lv));
@ -3059,7 +3059,7 @@ static int _find_some_parallel_space(struct alloc_handle *ah,
goto next_pv; goto next_pv;
/* FIXME Split into log and non-log parallel_pvs and only check the log ones if log_iteration? */ /* FIXME Split into log and non-log parallel_pvs and only check the log ones if log_iteration? */
/* (I've temporatily disabled the check.) */ /* (I've temporarily disabled the check.) */
/* Avoid PVs used by existing parallel areas */ /* Avoid PVs used by existing parallel areas */
if (!log_iteration_count && parallel_pvs && _pv_is_parallel(pvm->pv, parallel_pvs, ah->cling_tag_list_cn)) if (!log_iteration_count && parallel_pvs && _pv_is_parallel(pvm->pv, parallel_pvs, ah->cling_tag_list_cn))
goto next_pv; goto next_pv;
@ -4111,7 +4111,7 @@ int lv_add_mirror_lvs(struct logical_volume *lv,
* FIXME: Mirrored logs are built inefficiently. * FIXME: Mirrored logs are built inefficiently.
* A mirrored log currently uses the same layout that a mirror * A mirrored log currently uses the same layout that a mirror
* LV uses. The mirror layer sits on top of AREA_LVs which form the * LV uses. The mirror layer sits on top of AREA_LVs which form the
* legs, rather on AREA_PVs. This is done to allow re-use of the * legs, rather on AREA_PVs. This is done to allow reuse of the
* various mirror functions to also handle the mirrored LV that makes * various mirror functions to also handle the mirrored LV that makes
* up the log. * up the log.
* *
@ -5062,7 +5062,7 @@ static uint32_t _adjust_amount(dm_percent_t percent, int policy_threshold, int p
percent <= (policy_threshold * DM_PERCENT_1)) percent <= (policy_threshold * DM_PERCENT_1))
return 0; /* nothing to do */ return 0; /* nothing to do */
/* /*
* Evaluate the minimal amount needed to get bellow threshold. * Evaluate the minimal amount needed to get below threshold.
* Keep using DM_PERCENT_1 units for better precision. * Keep using DM_PERCENT_1 units for better precision.
* Round-up to needed percentage value * Round-up to needed percentage value
*/ */
@ -5292,7 +5292,7 @@ static int _lvresize_extents_from_percent(const struct logical_volume *lv,
lp->extents = percent_of_extents(lp->extents, pv_extent_count, lp->extents = percent_of_extents(lp->extents, pv_extent_count,
(lp->sign != SIGN_MINUS)); (lp->sign != SIGN_MINUS));
} else if (lp->percent_value) { } else if (lp->percent_value) {
/* lvresize has PVs args and no size of exents options */ /* lvresize has PVs args and no size of extents options */
old_extents = lp->percent_value; old_extents = lp->percent_value;
lp->extents = percent_of_extents(lp->percent_value, pv_extent_count, lp->extents = percent_of_extents(lp->percent_value, pv_extent_count,
(lp->sign != SIGN_MINUS)); (lp->sign != SIGN_MINUS));
@ -6034,7 +6034,7 @@ static int _lv_resize_check_used(struct logical_volume *lv)
* fs or lv if the fs resize would require mounting or unmounting. * fs or lv if the fs resize would require mounting or unmounting.
* *
* --fs resize --fsmode offline: resize the fs only while it's unmounted * --fs resize --fsmode offline: resize the fs only while it's unmounted
* unmounting the fs if needed. fail the commandn without * unmounting the fs if needed. fail the command without
* reducing the fs or lv if the fs resize would require having * reducing the fs or lv if the fs resize would require having
* the fs mounted. * the fs mounted.
* *
@ -7753,7 +7753,7 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
return_0; return_0;
/* Release unneeded blocks in thin pool */ /* Release unneeded blocks in thin pool */
/* TODO: defer when multiple LVs relased at once */ /* TODO: defer when multiple LVs released at once */
if (pool_lv && !update_thin_pool_lv(pool_lv, 1)) { if (pool_lv && !update_thin_pool_lv(pool_lv, 1)) {
if (force < DONT_PROMPT_OVERRIDE) { if (force < DONT_PROMPT_OVERRIDE) {
log_error("Failed to update thin pool %s.", display_lvname(pool_lv)); log_error("Failed to update thin pool %s.", display_lvname(pool_lv));
@ -7991,7 +7991,7 @@ static int _lv_update_and_reload(struct logical_volume *lv, int origin_only)
display_lvname(lock_lv)); display_lvname(lock_lv));
return 0; return 0;
} else if (!(r = vg_commit(vg))) } else if (!(r = vg_commit(vg)))
stack; /* !vg_commit() has implict vg_revert() */ stack; /* !vg_commit() has implicit vg_revert() */
log_very_verbose("Updating logical volume %s in kernel.", log_very_verbose("Updating logical volume %s in kernel.",
display_lvname(lock_lv)); display_lvname(lock_lv));
@ -8069,7 +8069,7 @@ static int _split_parent_area(struct lv_segment *seg, uint32_t s,
} }
/* /*
* Split the parent LV segments if the layer LV below it is splitted. * Split the parent LV segments if the layer LV below it is split.
*/ */
int split_parent_segments_for_layer(struct cmd_context *cmd, int split_parent_segments_for_layer(struct cmd_context *cmd,
struct logical_volume *layer_lv) struct logical_volume *layer_lv)
@ -8303,7 +8303,7 @@ int remove_layer_from_lv(struct logical_volume *lv,
*/ */
/* FIXME: /* FIXME:
* These are all INTERNAL_ERROR, but ATM there is * These are all INTERNAL_ERROR, but ATM there is
* some internal API problem and this code is wrongle * some internal API problem and this code is wrongly
* executed with certain mirror manipulations. * executed with certain mirror manipulations.
* So we need to fix mirror code first, then switch... * So we need to fix mirror code first, then switch...
*/ */
@ -8352,7 +8352,7 @@ int remove_layer_from_lv(struct logical_volume *lv,
return_0; return_0;
/* /*
* recuresively rename sub LVs * recursively rename sub LVs
* currently supported only for thin data layer * currently supported only for thin data layer
* FIXME: without strcmp it breaks mirrors.... * FIXME: without strcmp it breaks mirrors....
*/ */
@ -8476,7 +8476,7 @@ struct logical_volume *insert_layer_for_lv(struct cmd_context *cmd,
lv_where->profile = lv_where->vg->cmd->profile_params->global_metadata_profile; lv_where->profile = lv_where->vg->cmd->profile_params->global_metadata_profile;
/* /*
* recuresively rename sub LVs * recursively rename sub LVs
* currently supported only for thin data layer * currently supported only for thin data layer
* FIXME: without strcmp it breaks mirrors.... * FIXME: without strcmp it breaks mirrors....
*/ */
@ -9218,7 +9218,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
} }
if (seg_is_thin_volume(lp)) { if (seg_is_thin_volume(lp)) {
/* Validate volume size to to aling on chunk for small extents */ /* Validate volume size to to align on chunk for small extents */
size = first_seg(pool_lv)->chunk_size; size = first_seg(pool_lv)->chunk_size;
if (size > vg->extent_size) { if (size > vg->extent_size) {
/* Align extents on chunk boundary size */ /* Align extents on chunk boundary size */
@ -9568,7 +9568,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
goto revert_new_lv; goto revert_new_lv;
} }
} else if (lv_is_cache_pool(lv)) { } else if (lv_is_cache_pool(lv)) {
/* Cache pool cannot be actived and zeroed */ /* Cache pool cannot be activated and zeroed */
log_very_verbose("Cache pool is prepared."); log_very_verbose("Cache pool is prepared.");
} else if (lv_is_thin_volume(lv)) { } else if (lv_is_thin_volume(lv)) {
/* Optimize the case when taking a snapshot within same pool and thin origin /* Optimize the case when taking a snapshot within same pool and thin origin
@ -9893,7 +9893,7 @@ struct logical_volume *lv_create_single(struct volume_group *vg,
return NULL; return NULL;
} }
/* Convertion via lvcreate */ /* Conversion via lvcreate */
log_print_unless_silent("Logical volume %s is now cached.", log_print_unless_silent("Logical volume %s is now cached.",
display_lvname(lv)); display_lvname(lv));
return lv; return lv;

View File

@ -407,7 +407,7 @@ struct pv_segment {
#define FMT_INSTANCE_MDAS 0x00000002U #define FMT_INSTANCE_MDAS 0x00000002U
/* /*
* Include any auxiliary mdas during format_instance intialisation. * Include any auxiliary mdas during format_instance initialisation.
* Currently, this includes metadata areas as defined by * Currently, this includes metadata areas as defined by
* metadata/dirs and metadata/raws setting. * metadata/dirs and metadata/raws setting.
*/ */

View File

@ -3150,7 +3150,7 @@ int vg_commit(struct volume_group *vg)
dm_list_iterate_items(pvl, &vg->pvs) dm_list_iterate_items(pvl, &vg->pvs)
pvl->pv->status &= ~PV_MOVED_VG; pvl->pv->status &= ~PV_MOVED_VG;
/* This *is* the original now that it's commited. */ /* This *is* the original now that it's committed. */
_vg_move_cached_precommitted_to_committed(vg); _vg_move_cached_precommitted_to_committed(vg);
if (vg->needs_write_and_commit){ if (vg->needs_write_and_commit){
@ -3259,7 +3259,7 @@ static int _vg_read_orphan_pv(struct lvmcache_info *info, void *baton)
* before clearing the in-use flag. In this case, the * before clearing the in-use flag. In this case, the
* in-use flag needs to be manually cleared on the PV. * in-use flag needs to be manually cleared on the PV.
* *
* . The PV may have damanged/unrecognized VG metadata * . The PV may have damaged/unrecognized VG metadata
* that lvm could not read. * that lvm could not read.
* *
* . The PV may have no mdas, and the PVs with the metadata * . The PV may have no mdas, and the PVs with the metadata
@ -3592,7 +3592,7 @@ static void _set_pv_device(struct format_instance *fid,
} }
/* /*
* Finds the 'struct device' that correponds to each PV in the metadata, * Finds the 'struct device' that corresponds to each PV in the metadata,
* and may make some adjustments to vg fields based on the dev properties. * and may make some adjustments to vg fields based on the dev properties.
*/ */
void set_pv_devices(struct format_instance *fid, struct volume_group *vg) void set_pv_devices(struct format_instance *fid, struct volume_group *vg)
@ -4184,10 +4184,10 @@ struct metadata_area *mda_copy(struct dm_pool *mem,
} }
/* /*
* This function provides a way to answer the question on a format specific * This function provides a way to answer the question on a format specific
* basis - does the format specfic context of these two metadata areas * basis - does the format specific context of these two metadata areas
* match? * match?
* *
* A metatdata_area is defined to be independent of the underlying context. * A metadata_area is defined to be independent of the underlying context.
* This has the benefit that we can use the same abstraction to read disks * This has the benefit that we can use the same abstraction to read disks
* (see _metadata_text_raw_ops) or files (see _metadata_text_file_ops). * (see _metadata_text_raw_ops) or files (see _metadata_text_file_ops).
* However, one downside is there is no format-independent way to determine * However, one downside is there is no format-independent way to determine
@ -4954,7 +4954,7 @@ struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name, const
goto bad; goto bad;
} }
/* I belive this is unused, the name is always set. */ /* I believe this is unused, the name is always set. */
if (!vg_name && !(vg_name = lvmcache_vgname_from_vgid(cmd->mem, vgid))) { if (!vg_name && !(vg_name = lvmcache_vgname_from_vgid(cmd->mem, vgid))) {
unlock_vg(cmd, NULL, vg_name); unlock_vg(cmd, NULL, vg_name);
log_error("VG name not found for vgid %s", vgid); log_error("VG name not found for vgid %s", vgid);
@ -4964,7 +4964,7 @@ struct volume_group *vg_read(struct cmd_context *cmd, const char *vg_name, const
/* /*
* If the command is process all vgs, process_each will get a list of vgname+vgid * If the command is process all vgs, process_each will get a list of vgname+vgid
* pairs, and then call vg_read() for each vgname+vigd. In this case we know * pairs, and then call vg_read() for each vgname+vgid. In this case we know
* which VG to read even if there are duplicate names, and we don't fail. * which VG to read even if there are duplicate names, and we don't fail.
* *
* If the user has requested one VG by name, process_each passes only the vgname * If the user has requested one VG by name, process_each passes only the vgname
@ -5195,7 +5195,7 @@ bad:
* there's an error. It is here for process_each_pv() which wants to * there's an error. It is here for process_each_pv() which wants to
* eliminate the VG's devs from the list of devs it is processing, even * eliminate the VG's devs from the list of devs it is processing, even
* when it can't access the VG because of wrong system id or similar. * when it can't access the VG because of wrong system id or similar.
* This could be done by looking at lvmcache info structs intead of 'vg'. * This could be done by looking at lvmcache info structs instead of 'vg'.
* It's also used by process_each_vg/process_each_lv which want to * It's also used by process_each_vg/process_each_lv which want to
* include error_vg values (like system_id) in error messages. * include error_vg values (like system_id) in error messages.
* These values could also be found from lvmcache vginfo. * These values could also be found from lvmcache vginfo.

View File

@ -222,7 +222,7 @@ struct format_instance *alloc_fid(const struct format_type *fmt,
/* /*
* Format instance must always be set using pv_set_fid or vg_set_fid * Format instance must always be set using pv_set_fid or vg_set_fid
* (NULL value as well), never asign it directly! This is essential * (NULL value as well), never assign it directly! This is essential
* for proper reference counting for the format instance. * for proper reference counting for the format instance.
*/ */
void pv_set_fid(struct physical_volume *pv, struct format_instance *fid); void pv_set_fid(struct physical_volume *pv, struct format_instance *fid);
@ -431,7 +431,7 @@ int lv_has_constant_stripes(struct logical_volume *lv);
/* /*
* Sometimes (eg, after an lvextend), it is possible to merge two * Sometimes (eg, after an lvextend), it is possible to merge two
* adjacent segments into a single segment. This function trys * adjacent segments into a single segment. This function tries
* to merge as many segments as possible. * to merge as many segments as possible.
*/ */
int lv_merge_segments(struct logical_volume *lv); int lv_merge_segments(struct logical_volume *lv);

View File

@ -780,7 +780,7 @@ static int _split_mirror_images(struct logical_volume *lv,
* If collapse is non-zero, <removed> is guaranteed to be equal to num_removed. * If collapse is non-zero, <removed> is guaranteed to be equal to num_removed.
* *
* Return values: * Return values:
* Failure (0) means something unexpected has happend and * Failure (0) means something unexpected has happened and
* the caller should abort. * the caller should abort.
* Even if no mirror was removed (e.g. no LV matches to 'removable_pvs'), * Even if no mirror was removed (e.g. no LV matches to 'removable_pvs'),
* returns success (1). * returns success (1).

View File

@ -739,7 +739,7 @@ static struct logical_volume *_alloc_pool_metadata_spare(struct volume_group *vg
int handle_pool_metadata_spare(struct volume_group *vg, uint32_t extents, int handle_pool_metadata_spare(struct volume_group *vg, uint32_t extents,
struct dm_list *pvh, int poolmetadataspare) struct dm_list *pvh, int poolmetadataspare)
{ {
/* Max usable size of any spare volume is currently 16GiB rouned to extent size */ /* Max usable size of any spare volume is currently 16GiB rounded to extent size */
const uint64_t MAX_SIZE = (UINT64_C(2 * 16) * 1024 * 1024 + vg->extent_size - 1) / vg->extent_size; const uint64_t MAX_SIZE = (UINT64_C(2 * 16) * 1024 * 1024 + vg->extent_size - 1) / vg->extent_size;
struct logical_volume *lv = vg->pool_metadata_spare_lv; struct logical_volume *lv = vg->pool_metadata_spare_lv;
uint32_t seg_mirrors; uint32_t seg_mirrors;

View File

@ -81,7 +81,7 @@ static int _rebuild_with_emptymeta_is_supported(struct cmd_context *cmd,
* Ensure region size exceeds the minimum for @lv because * Ensure region size exceeds the minimum for @lv because
* MD's bitmap is limited to tracking 2^21 regions. * MD's bitmap is limited to tracking 2^21 regions.
* *
* Pass in @lv_size, because funcion can be called with an empty @lv. * Pass in @lv_size, because function can be called with an empty @lv.
*/ */
uint32_t raid_ensure_min_region_size(const struct logical_volume *lv, uint64_t raid_size, uint32_t region_size) uint32_t raid_ensure_min_region_size(const struct logical_volume *lv, uint64_t raid_size, uint32_t region_size)
{ {
@ -447,7 +447,7 @@ int lv_raid_in_sync(const struct logical_volume *lv)
* This function makes no on-disk changes. The residual LVs * This function makes no on-disk changes. The residual LVs
* returned in 'removal_lvs' must be freed by the caller. * returned in 'removal_lvs' must be freed by the caller.
* *
* Returns: 1 on succes, 0 on failure * Returns: 1 on success, 0 on failure
*/ */
static int _raid_remove_top_layer(struct logical_volume *lv, static int _raid_remove_top_layer(struct logical_volume *lv,
struct dm_list *removal_lvs) struct dm_list *removal_lvs)
@ -550,7 +550,7 @@ static int _reset_flags_passed_to_kernel(struct logical_volume *lv, int *flags_r
* Run optional variable args function fn_post_on_lv with fn_post_data on @lv before second metadata update * Run optional variable args function fn_post_on_lv with fn_post_data on @lv before second metadata update
* Run optional variable args function fn_pre_on_lv with fn_pre_data on @lv before first metadata update * Run optional variable args function fn_pre_on_lv with fn_pre_data on @lv before first metadata update
* *
* This minimaly involves 2 metadata commits or more, depending on * This minimally involves 2 metadata commits or more, depending on
* pre and post functions carrying out any additional ones or not. * pre and post functions carrying out any additional ones or not.
* *
* WARNING: needs to be called with at least 4 arguments to suit va_list processing! * WARNING: needs to be called with at least 4 arguments to suit va_list processing!
@ -586,7 +586,7 @@ static int _lv_update_reload_fns_reset_eliminate_lvs(struct logical_volume *lv,
va_end(ap); va_end(ap);
/* Call any fn_pre_on_lv before the first update and reload call (e.g. to rename LVs) */ /* Call any fn_pre_on_lv before the first update and reload call (e.g. to rename LVs) */
/* returns 1: ok+ask caller to update, 2: metadata commited+ask caller to resume */ /* returns 1: ok+ask caller to update, 2: metadata committed+ask caller to resume */
if (fn_pre_on_lv && !(r = fn_pre_on_lv(lv, fn_pre_data))) { if (fn_pre_on_lv && !(r = fn_pre_on_lv(lv, fn_pre_data))) {
log_error(INTERNAL_ERROR "Pre callout function failed."); log_error(INTERNAL_ERROR "Pre callout function failed.");
return 0; return 0;
@ -643,7 +643,7 @@ static int _lv_update_reload_fns_reset_eliminate_lvs(struct logical_volume *lv,
* Assisted excl_local activation of lvl listed LVs before resume * Assisted excl_local activation of lvl listed LVs before resume
* *
* FIXME: code which needs to use this function is usually unsafe * FIXME: code which needs to use this function is usually unsafe
* againt crashes as it's doing more then 1 operation per commit * against crashes as it's doing more then 1 operation per commit
* and as such is currently irreversible on error path. * and as such is currently irreversible on error path.
* *
* Function is not making backup as this is usually not the last * Function is not making backup as this is usually not the last
@ -701,7 +701,7 @@ static int _lv_update_and_reload_list(struct logical_volume *lv, int origin_only
return r; return r;
} }
/* Wipe all LVs listsed on @lv_list committing lvm metadata */ /* Wipe all LVs listed on @lv_list committing lvm metadata */
static int _clear_lvs(struct dm_list *lv_list) static int _clear_lvs(struct dm_list *lv_list)
{ {
return activate_and_wipe_lvlist(lv_list, 1); return activate_and_wipe_lvlist(lv_list, 1);
@ -1092,7 +1092,7 @@ static int _alloc_image_components(struct logical_volume *lv,
* each of the rimages is the same size - 'le_count'. However * each of the rimages is the same size - 'le_count'. However
* for RAID 4/5/6, the stripes add together (NOT including the parity * for RAID 4/5/6, the stripes add together (NOT including the parity
* devices) to equal 'le_count'. Thus, when we are allocating * devices) to equal 'le_count'. Thus, when we are allocating
* individual devies, we must specify how large the individual device * individual devices, we must specify how large the individual device
* is along with the number we want ('count'). * is along with the number we want ('count').
*/ */
if (use_existing_area_len) if (use_existing_area_len)
@ -1824,7 +1824,7 @@ static int _reshape_adjust_to_size(struct logical_volume *lv,
/* /*
* HM Helper: * HM Helper:
* *
* Reshape: add immages to existing raid lv * Reshape: add images to existing raid lv
* *
*/ */
static int _lv_raid_change_image_count(struct logical_volume *lv, int yes, uint32_t new_count, static int _lv_raid_change_image_count(struct logical_volume *lv, int yes, uint32_t new_count,
@ -2128,16 +2128,16 @@ static int _raid_reshape_keep_images(struct logical_volume *lv,
} }
/* /*
* Reshape layout alogorithm or chunksize: * Reshape layout algorithm or chunksize:
* *
* Allocate free out-of-place reshape space unless raid10_far. * Allocate free out-of-place reshape space unless raid10_far.
* *
* If other raid10, allocate it appropriatly. * If other raid10, allocate it appropriately.
* *
* Allocate it anywhere for raid4/5 to avoid remapping * Allocate it anywhere for raid4/5 to avoid remapping
* it in case it is already allocated. * it in case it is already allocated.
* *
* The dm-raid target is able to use the space whereever it * The dm-raid target is able to use the space wherever it
* is found by appropriately selecting forward or backward reshape. * is found by appropriately selecting forward or backward reshape.
*/ */
if (seg->segtype != new_segtype && if (seg->segtype != new_segtype &&
@ -2268,7 +2268,7 @@ static int _pre_raid0_remove_rmeta(struct logical_volume *lv, void *data)
if (!_vg_write_lv_suspend_vg_commit(lv, 1)) if (!_vg_write_lv_suspend_vg_commit(lv, 1))
return_0; return_0;
/* 1: ok+ask caller to update, 2: metadata commited+ask caller to resume */ /* 1: ok+ask caller to update, 2: metadata committed+ask caller to resume */
return _activate_sub_lvs_excl_local_list(lv, lv_list) ? 2 : 0; return _activate_sub_lvs_excl_local_list(lv, lv_list) ? 2 : 0;
} }
@ -2485,7 +2485,7 @@ static int _raid_reshape(struct logical_volume *lv,
* - # of stripes requested to change * - # of stripes requested to change
* (i.e. add/remove disks from a striped raid set) * (i.e. add/remove disks from a striped raid set)
* -or- * -or-
* - stripe size change requestd * - stripe size change requested
* (e.g. 32K -> 128K) * (e.g. 32K -> 128K)
* *
* Returns: * Returns:
@ -5179,7 +5179,7 @@ static int _raid45_to_raid54_wrapper(TAKEOVER_FN_ARGS)
} }
/* Necessary when convering to raid0/striped w/o redundancy. */ /* Necessary when converting to raid0/striped w/o redundancy. */
if (!_raid_in_sync(lv)) { if (!_raid_in_sync(lv)) {
log_error("Unable to convert %s while it is not in-sync.", log_error("Unable to convert %s while it is not in-sync.",
display_lvname(lv)); display_lvname(lv));
@ -5530,7 +5530,7 @@ static int _takeover_upconvert_wrapper(TAKEOVER_FN_ARGS)
if (!_lv_raid_change_image_count(lv, 1, new_image_count, allocate_pvs, NULL, 0, 1)) { if (!_lv_raid_change_image_count(lv, 1, new_image_count, allocate_pvs, NULL, 0, 1)) {
/* /*
* Rollback to initial type raid0/striped after failure to upconvert * Rollback to initial type raid0/striped after failure to upconvert
* to raid4/5/6/10 elminating any newly allocated metadata devices * to raid4/5/6/10 eliminating any newly allocated metadata devices
* (raid4/5 -> raid6 doesn't need any explicit changes after * (raid4/5 -> raid6 doesn't need any explicit changes after
* the allocation of the additional sub LV pair failed) * the allocation of the additional sub LV pair failed)
* *
@ -5648,7 +5648,7 @@ static int _takeover_upconvert_wrapper(TAKEOVER_FN_ARGS)
/************************************************/ /************************************************/
/* /*
* Customised takeover functions * Customized takeover functions
*/ */
static int _takeover_from_linear_to_raid0(TAKEOVER_FN_ARGS) static int _takeover_from_linear_to_raid0(TAKEOVER_FN_ARGS)
{ {
@ -6034,7 +6034,7 @@ static int _takeover_from_raid10_to_raid1(TAKEOVER_FN_ARGS)
} }
/* /*
* This'd be a reshape, not a takeover. * This would be a reshape, not a takeover.
* *
static int _takeover_from_raid10_to_raid10(TAKEOVER_FN_ARGS) static int _takeover_from_raid10_to_raid10(TAKEOVER_FN_ARGS)
{ {
@ -6334,7 +6334,7 @@ static int _region_size_change_requested(struct logical_volume *lv, int yes, con
if (!region_size) if (!region_size)
return_0; return_0;
/* CLI validation provides the check but be caucious... */ /* CLI validation provides the check but be cautious... */
if (!lv_is_raid(lv) || !seg || seg_is_any_raid0(seg)) { if (!lv_is_raid(lv) || !seg || seg_is_any_raid0(seg)) {
log_error(INTERNAL_ERROR "Cannot change region size of %s.", log_error(INTERNAL_ERROR "Cannot change region size of %s.",
display_lvname(lv)); display_lvname(lv));
@ -6561,7 +6561,7 @@ int lv_raid_convert(struct logical_volume *lv,
region_size = region_size ? : (uint32_t)get_default_region_size(lv->vg->cmd); region_size = region_size ? : (uint32_t)get_default_region_size(lv->vg->cmd);
/* /*
* Check acceptible options mirrors, region_size, * Check acceptable options mirrors, region_size,
* stripes and/or stripe_size have been provided. * stripes and/or stripe_size have been provided.
*/ */
if (!_conversion_options_allowed(seg, &new_segtype, yes, if (!_conversion_options_allowed(seg, &new_segtype, yes,
@ -6745,7 +6745,7 @@ has_enough_space:
* _lv_raid_has_primary_failure_on_recover * _lv_raid_has_primary_failure_on_recover
* @lv * @lv
* *
* The kernel behaves strangely in the presense of a primary failure * The kernel behaves strangely in the presence of a primary failure
* during a "recover" sync operation. It's not technically a bug, I * during a "recover" sync operation. It's not technically a bug, I
* suppose, but the output of the status line can make it difficult * suppose, but the output of the status line can make it difficult
* to determine that we are in this state. The sync ratio will be * to determine that we are in this state. The sync ratio will be

View File

@ -42,7 +42,7 @@ struct dev_manager;
#define SEG_CACHE (1ULL << 13) #define SEG_CACHE (1ULL << 13)
#define SEG_CACHE_POOL (1ULL << 14) #define SEG_CACHE_POOL (1ULL << 14)
#define SEG_MIRROR (1ULL << 15) #define SEG_MIRROR (1ULL << 15)
#define SEG_ONLY_EXCLUSIVE (1ULL << 16) /* In cluster only exlusive activation */ #define SEG_ONLY_EXCLUSIVE (1ULL << 16) /* In cluster only exclusive activation */
#define SEG_CAN_ERROR_WHEN_FULL (1ULL << 17) #define SEG_CAN_ERROR_WHEN_FULL (1ULL << 17)
#define SEG_RAID0 (1ULL << 18) #define SEG_RAID0 (1ULL << 18)

View File

@ -330,7 +330,7 @@ int vg_remove_snapshot(struct logical_volume *cow)
clear_snapshot_merge(origin); clear_snapshot_merge(origin);
/* /*
* preload origin IFF "snapshot-merge" target is active * preload origin IFF "snapshot-merge" target is active
* - IMPORTANT: avoids preload if inactivate merge is pending * - IMPORTANT: avoids preload if inactive merge is pending
*/ */
} }

View File

@ -342,7 +342,7 @@ out:
/* /*
* Detect overprovisioning and check lvm2 is configured for auto resize. * Detect overprovisioning and check lvm2 is configured for auto resize.
* *
* If passed LV is thin volume/pool, check first only this one for overprovisiong. * If passed LV is thin volume/pool, check first only this one for overprovisioning.
* Lots of test combined together. * Lots of test combined together.
* Test is not detecting status of dmeventd, too complex for now... * Test is not detecting status of dmeventd, too complex for now...
*/ */
@ -1018,10 +1018,10 @@ int lv_is_thin_snapshot(const struct logical_volume *lv)
} }
/* /*
* Explict check of new thin pool for usability * Explicit check of new thin pool for usability
* *
* Allow use of thin pools by external apps. When lvm2 metadata has * Allow use of thin pools by external apps. When lvm2 metadata has
* transaction_id == 0 for a new thin pool, it will explicitely validate * transaction_id == 0 for a new thin pool, it will explicitly validate
* the pool is still unused. * the pool is still unused.
* *
* To prevent lvm2 to create thin volumes in externally used thin pools * To prevent lvm2 to create thin volumes in externally used thin pools
@ -1102,7 +1102,7 @@ uint64_t estimate_thin_pool_metadata_size(uint32_t data_extents, uint32_t extent
return _estimate_metadata_size(data_extents, extent_size, chunk_size); return _estimate_metadata_size(data_extents, extent_size, chunk_size);
} }
/* Validates whtether the LV can be used as external origin */ /* Validates whether the LV can be used as external origin */
int validate_thin_external_origin(const struct logical_volume *lv, int validate_thin_external_origin(const struct logical_volume *lv,
const struct logical_volume *pool_lv) const struct logical_volume *pool_lv)
{ {

View File

@ -97,7 +97,7 @@ const char *get_vdo_write_policy_name(enum dm_vdo_write_policy policy)
/* /*
* Size of VDO virtual LV is adding header_size in front and back of device * Size of VDO virtual LV is adding header_size in front and back of device
* to avoid colission with blkid checks. * to avoid collision with blkid checks.
*/ */
static uint64_t _get_virtual_size(uint32_t extents, uint32_t extent_size, static uint64_t _get_virtual_size(uint32_t extents, uint32_t extent_size,
uint32_t header_size) uint32_t header_size)

View File

@ -432,7 +432,7 @@ static int _mirrored_target_present(struct cmd_context *cmd,
} }
/* /*
* Check only for modules if atttributes requested and no previous check. * Check only for modules if attributes requested and no previous check.
*/ */
if (attributes) if (attributes)
*attributes = _mirror_attributes; *attributes = _mirror_attributes;

View File

@ -21,7 +21,7 @@
struct cmd_context; struct cmd_context;
/** /**
* Execute command with paramaters and return status * Execute command with parameters and return status
* *
* \param rstatus * \param rstatus
* Returns command's exit status code. * Returns command's exit status code.

View File

@ -153,7 +153,7 @@ static const char *_lvname_has_reserved_prefix(const char *lvname)
static const char *_lvname_has_reserved_component_string(const char *lvname) static const char *_lvname_has_reserved_component_string(const char *lvname)
{ {
static const char _strings[][12] = { static const char _strings[][12] = {
/* Suffixes for compoment LVs */ /* Suffixes for component LVs */
"_cdata", "_cdata",
"_cmeta", "_cmeta",
"_corig", "_corig",
@ -182,7 +182,7 @@ static const char *_lvname_has_reserved_component_string(const char *lvname)
static const char *_lvname_has_reserved_string(const char *lvname) static const char *_lvname_has_reserved_string(const char *lvname)
{ {
static const char _strings[][12] = { static const char _strings[][12] = {
/* Additional suffixes for non-compoment LVs */ /* Additional suffixes for non-component LVs */
"_pmspare", "_pmspare",
"_vorigin" "_vorigin"
}; };
@ -220,7 +220,7 @@ int apply_lvname_restrictions(const char *name)
} }
/* /*
* Validates name and returns an emunerated reason for name validataion failure. * Validates name and returns an enumerated reason for name validation failure.
*/ */
name_error_t validate_name_detailed(const char *name) name_error_t validate_name_detailed(const char *name)
{ {
@ -247,7 +247,7 @@ char *build_dm_uuid(struct dm_pool *mem, const struct logical_volume *lv,
if (!layer) { if (!layer) {
/* /*
* Mark internal LVs with layer suffix * Mark internal LVs with layer suffix
* so tools like blkid may immeditelly see it's * so tools like blkid may immediately see it's
* an internal LV they should not scan. * an internal LV they should not scan.
* Should also make internal detection simpler. * Should also make internal detection simpler.
*/ */
@ -295,7 +295,7 @@ char *first_substring(const char *str, ...)
} }
/* Cut suffix (if present) and write the name into NAME_LEN sized new_name buffer /* Cut suffix (if present) and write the name into NAME_LEN sized new_name buffer
* When suffix is NULL, everythin past the last '_' is removed. * When suffix is NULL, everything past the last '_' is removed.
* Returns 1 when suffix was removed, 0 otherwise. * Returns 1 when suffix was removed, 0 otherwise.
*/ */
int drop_lvname_suffix(char *new_name, const char *name, const char *suffix) int drop_lvname_suffix(char *new_name, const char *name, const char *suffix)

View File

@ -88,7 +88,7 @@ static int _memlock_count_daemon = 0;
static int _priority; static int _priority;
static int _default_priority; static int _default_priority;
/* list of maps, that are unconditionaly ignored */ /* list of maps, that are unconditionally ignored */
static const char _ignore_maps[][16] = { static const char _ignore_maps[][16] = {
"[vdso]", "[vdso]",
"[vsyscall]", "[vsyscall]",
@ -179,7 +179,7 @@ static void _allocate_memory(void)
/* FIXME else warn user setting got ignored */ /* FIXME else warn user setting got ignored */
#ifdef HAVE_MALLINFO2 #ifdef HAVE_MALLINFO2
/* Prefer mallinfo2 call when avaialble with newer glibc */ /* Prefer mallinfo2 call when available with newer glibc */
#define MALLINFO mallinfo2 #define MALLINFO mallinfo2
#else #else
#define MALLINFO mallinfo #define MALLINFO mallinfo
@ -525,7 +525,7 @@ static void _lock_mem(struct cmd_context *cmd)
/* /*
* For daemon we need to use mlockall() * For daemon we need to use mlockall()
* so even future adition of thread which may not even use lvm lib * so even future addition of thread which may not even use lvm lib
* will not block memory locked thread * will not block memory locked thread
* Note: assuming _memlock_count_daemon is updated before _memlock_count * Note: assuming _memlock_count_daemon is updated before _memlock_count
*/ */
@ -614,7 +614,7 @@ static void _unlock_mem_if_possible(struct cmd_context *cmd)
* remains fast. * remains fast.
* *
* Memory stays locked until 'memlock_unlock()' is called so when possible * Memory stays locked until 'memlock_unlock()' is called so when possible
* it may stay locked across multiple crictical section entrances. * it may stay locked across multiple critical section entrances.
*/ */
void critical_section_inc(struct cmd_context *cmd, const char *reason) void critical_section_inc(struct cmd_context *cmd, const char *reason)
{ {

View File

@ -110,7 +110,7 @@ static int _ ## NAME ## _get (const void *obj, struct lvm_property_type *prop) \
* then prepending this argument to '_disp'. For example, if this argument * then prepending this argument to '_disp'. For example, if this argument
* is 'uuid', the display function is _uuid_disp(). Adding a new field may * is 'uuid', the display function is _uuid_disp(). Adding a new field may
* require defining a new display function (for example _myfieldname_disp()), * require defining a new display function (for example _myfieldname_disp()),
* or re-use of an existing one (for example, _uint32_disp()). * or reuse of an existing one (for example, _uint32_disp()).
* 8. Unique format identifier / field id. This name must be unique and is * 8. Unique format identifier / field id. This name must be unique and is
* used to select fields via '-o' in the reporting commands (pvs/vgs/lvs). * used to select fields via '-o' in the reporting commands (pvs/vgs/lvs).
* The string used to specify the field - the 'id' member of * The string used to specify the field - the 'id' member of

View File

@ -560,7 +560,7 @@ static int _raid_target_present(struct cmd_context *cmd,
_features[i].feature); _features[i].feature);
/* /*
* Seperate check for proper raid4 mapping supported * Separate check for proper raid4 mapping supported
* *
* If we get more of these range checks, avoid them * If we get more of these range checks, avoid them
* altogether by enhancing 'struct raid_feature' * altogether by enhancing 'struct raid_feature'

View File

@ -533,7 +533,7 @@ static int _preparse_fuzzy_time(const char *s, struct time_info *info)
* If the string consists of -:+, digits or spaces, * If the string consists of -:+, digits or spaces,
* it's not worth looking for fuzzy names here - * it's not worth looking for fuzzy names here -
* it's standard YYYY-MM-DD HH:MM:SS +-HH:MM format * it's standard YYYY-MM-DD HH:MM:SS +-HH:MM format
* and that is parseable by libdm directly. * and that is parsable by libdm directly.
*/ */
if (!(isdigit(c) || (c == '-') || (c == ':') || (c == '+'))) if (!(isdigit(c) || (c == '-') || (c == ':') || (c == '+')))
fuzzy = 1; fuzzy = 1;
@ -2562,7 +2562,7 @@ static int _segstartpe_disp(struct dm_report *rh,
return dm_report_field_uint32(rh, field, &seg->le); return dm_report_field_uint32(rh, field, &seg->le);
} }
/* Hepler: get used stripes = total stripes minux any to remove after reshape */ /* Helper: get used stripes = total stripes minus any to remove after reshape */
static int _get_seg_used_stripes(const struct lv_segment *seg) static int _get_seg_used_stripes(const struct lv_segment *seg)
{ {
uint32_t s; uint32_t s;
@ -2636,7 +2636,7 @@ static struct logical_volume *_lv_for_raid_image_seg(const struct lv_segment *se
return NULL; return NULL;
} }
/* Helper: return the top-level raid LV in case it is reshapale for @seg or @seg if it is */ /* Helper: return the top-level raid LV in case it is reshapable for @seg or @seg if it is */
static const struct lv_segment *_get_reshapable_seg(const struct lv_segment *seg, struct dm_pool *mem) static const struct lv_segment *_get_reshapable_seg(const struct lv_segment *seg, struct dm_pool *mem)
{ {
return _lv_for_raid_image_seg(seg, mem) ? seg : NULL; return _lv_for_raid_image_seg(seg, mem) ? seg : NULL;

View File

@ -30,7 +30,7 @@
* <field_name>_<reserved_value_name> * <field_name>_<reserved_value_name>
* *
* FIELD_BINARY_RESERVED_VALUE is similar to FIELD_RESERVED_VALUE but it * FIELD_BINARY_RESERVED_VALUE is similar to FIELD_RESERVED_VALUE but it
* is specifically designed for defintion of reserved names for fields * is specifically designed for definition of reserved names for fields
* with binary values where the reserved names given denote value 1. * with binary values where the reserved names given denote value 1.
* The first reserved_name given is also used for reporting, * The first reserved_name given is also used for reporting,
* others are synonyms which are recognized in addition. * others are synonyms which are recognized in addition.

View File

@ -22,7 +22,7 @@
#include "lib/activate/activate.h" #include "lib/activate/activate.h"
#include "lib/datastruct/str_list.h" #include "lib/datastruct/str_list.h"
/* Dm kernel module name for thin provisiong */ /* Dm kernel module name for thin provisioning */
static const char _thin_pool_module[] = "thin-pool"; static const char _thin_pool_module[] = "thin-pool";
static const char _thin_module[] = "thin"; static const char _thin_module[] = "thin";
@ -350,7 +350,7 @@ static int _thin_pool_add_target_line(struct dev_manager *dm,
/* /*
* Add messages only for activation tree. * Add messages only for activation tree.
* Otherwise avoid checking for existence of suspended origin. * Otherwise avoid checking for existence of suspended origin.
* Also transation_id is checked only when snapshot origin is active. * Also transaction_id is checked only when snapshot origin is active.
* (This might change later) * (This might change later)
*/ */
if (!laopts->send_messages) if (!laopts->send_messages)
@ -410,7 +410,7 @@ static int _thin_pool_target_percent(void **target_state __attribute__((unused))
if (s->fail || s->error) if (s->fail || s->error)
*percent = DM_PERCENT_INVALID; *percent = DM_PERCENT_INVALID;
/* With 'seg' report metadata percent, otherwice data percent */ /* With 'seg' report metadata percent, otherwise data percent */
else if (seg) { else if (seg) {
*percent = dm_make_percent(s->used_metadata_blocks, *percent = dm_make_percent(s->used_metadata_blocks,
s->total_metadata_blocks); s->total_metadata_blocks);

View File

@ -459,7 +459,7 @@ static int _vdo_target_present(struct cmd_context *cmd,
/* If stripe target was already detected, reuse its result */ /* If stripe target was already detected, reuse its result */
if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED)) || if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED)) ||
!segtype->ops->target_present || !segtype->ops->target_present(cmd, NULL, NULL)) { !segtype->ops->target_present || !segtype->ops->target_present(cmd, NULL, NULL)) {
/* Linear/Stripe targer is for mapping LVs on top of single VDO volume. */ /* Linear/Stripe target is for mapping LVs on top of single VDO volume. */
if (!target_present(cmd, TARGET_NAME_LINEAR, 0) || if (!target_present(cmd, TARGET_NAME_LINEAR, 0) ||
!target_present(cmd, TARGET_NAME_STRIPED, 0)) !target_present(cmd, TARGET_NAME_STRIPED, 0))
return 0; return 0;

View File

@ -115,7 +115,7 @@ typedef struct daemon_state {
log_state *log; log_state *log;
struct thread_state *threads; struct thread_state *threads;
/* suport for shutdown on idle */ /* support for shutdown on idle */
daemon_idle *idle; daemon_idle *idle;
void *private; /* the global daemon state */ void *private; /* the global daemon state */

View File

@ -128,7 +128,7 @@ static int daemon_close_stray_fds(const char *command, int suppress_warning,
_daemon_get_cmdline(ppid, parent_cmdline, sizeof(parent_cmdline)); _daemon_get_cmdline(ppid, parent_cmdline, sizeof(parent_cmdline));
if ((d = opendir(_fd_dir))) { if ((d = opendir(_fd_dir))) {
/* Discover openned descriptors from /proc/self/fd listing */ /* Discover opened descriptors from /proc/self/fd listing */
while ((dirent = readdir(d))) { while ((dirent = readdir(d))) {
fd = atoi(dirent->d_name); fd = atoi(dirent->d_name);
if ((fd > from_fd) && if ((fd > from_fd) &&

View File

@ -180,7 +180,7 @@ scan:
/* /*
* whitespaces between digits are not allowed, * whitespaces between digits are not allowed,
* but it's ok if whitespaces are on head or tail. * but it's ok if whitespaces are on head or tail.
* when old_c is whilespace, * when old_c is whitespace,
* if totaldigits == ndigits, whitespace is on head. * if totaldigits == ndigits, whitespace is on head.
* if whitespace is on tail, it should not run here. * if whitespace is on tail, it should not run here.
* as c was ',' or '\0', * as c was ',' or '\0',

View File

@ -262,7 +262,7 @@ int dm_hash_insert_allow_multiple(struct dm_hash_table *t, const char *key,
/* /*
* Look through multiple entries with the same key for one that has a * Look through multiple entries with the same key for one that has a
* matching val and return that. If none have maching val, return NULL. * matching val and return that. If none have matching val, return NULL.
*/ */
void *dm_hash_lookup_with_val(struct dm_hash_table *t, const char *key, void *dm_hash_lookup_with_val(struct dm_hash_table *t, const char *key,
const void *val, uint32_t val_len) const void *val, uint32_t val_len)

View File

@ -109,7 +109,7 @@ static void _dmfilemapd_log_with_errno(int level,
} }
/* /*
* Only used for reporting errors before daemonise(). * Only used for reporting errors before daemonize().
*/ */
__attribute__((format(printf, 1, 2))) __attribute__((format(printf, 1, 2)))
static void _early_log(const char *fmt, ...) static void _early_log(const char *fmt, ...)
@ -668,7 +668,7 @@ static int _daemonize(struct filemap_monitor *fm)
if (fd > STDERR_FILENO) if (fd > STDERR_FILENO)
(void) close(fd); (void) close(fd);
} }
/* TODO: Use libdaemon/server/daemon-server.c _daemonise() */ /* TODO: Use libdaemon/server/daemon-server.c _daemonize() */
for (ffd = (int) sysconf(_SC_OPEN_MAX) - 1; ffd > STDERR_FILENO; --ffd) for (ffd = (int) sysconf(_SC_OPEN_MAX) - 1; ffd > STDERR_FILENO; --ffd)
if (ffd != fm->fd) if (ffd != fm->fd)
(void) close(ffd); (void) close(ffd);
@ -772,7 +772,7 @@ static int _dmfilemapd(struct filemap_monitor *fm)
wait: wait:
_filemap_monitor_wait(FILEMAPD_WAIT_USECS); _filemap_monitor_wait(FILEMAPD_WAIT_USECS);
/* mode=inode termination condions */ /* mode=inode termination conditions */
if (fm->mode == DM_FILEMAPD_FOLLOW_INODE) { if (fm->mode == DM_FILEMAPD_FOLLOW_INODE) {
if (!_filemap_monitor_check_file_unlinked(fm)) if (!_filemap_monitor_check_file_unlinked(fm))
goto bad; goto bad;

View File

@ -108,7 +108,7 @@ struct option {
#define DM_STATS_PROGRAM_ID "dmstats" #define DM_STATS_PROGRAM_ID "dmstats"
/* /*
* Basic commands this code implments. * Basic commands this code implements.
*/ */
typedef enum { typedef enum {
DMSETUP_CMD = 0, DMSETUP_CMD = 0,
@ -787,7 +787,7 @@ static int _update_interval_times(void)
} }
/* /*
* Take cycle timstamp as close as possible to ioctl return. * Take cycle timestamp as close as possible to ioctl return.
* *
* FIXME: use per-region timestamp deltas for interval estimate. * FIXME: use per-region timestamp deltas for interval estimate.
*/ */
@ -2034,7 +2034,7 @@ static int _simple(int task, const char *name, uint32_t event_nr, int display)
if (_switches[CHECKS_ARG] && !dm_task_enable_checks(dmt)) if (_switches[CHECKS_ARG] && !dm_task_enable_checks(dmt))
goto_out; goto_out;
/* FIXME: needs to coperate with udev */ /* FIXME: needs to cooperate with udev */
if (!_set_task_add_node(dmt)) if (!_set_task_add_node(dmt))
goto_out; goto_out;
@ -2858,7 +2858,7 @@ _tsym_vt100 = {
/* /*
* Tree drawing functions. * Tree drawing functions.
*/ */
/* FIXME Get rid of these statics - use dynamic struct */ /* FIXME Get rid of these statistics - use dynamic struct */
/* FIXME Explain what these vars are for */ /* FIXME Explain what these vars are for */
static int _tree_width[MAX_DEPTH], _tree_more[MAX_DEPTH]; static int _tree_width[MAX_DEPTH], _tree_more[MAX_DEPTH];
static int _termwidth = 80; /* Maximum output width */ static int _termwidth = 80; /* Maximum output width */

View File

@ -666,7 +666,7 @@ void *dm_get_next_target(struct dm_task *dmt, void *next,
return t->next; return t->next;
} }
/* Unmarshall the target info returned from a status call */ /* Unmarshal the target info returned from a status call */
static int _unmarshal_status(struct dm_task *dmt, struct dm_ioctl *dmi) static int _unmarshal_status(struct dm_task *dmt, struct dm_ioctl *dmi)
{ {
char *outbuf = (char *) dmi + dmi->data_start; char *outbuf = (char *) dmi + dmi->data_start;

View File

@ -283,15 +283,15 @@ int dm_task_add_target(struct dm_task *dmt,
#define DM_FORMAT_DEV_BUFSIZE 13 /* Minimum bufsize to handle worst case. */ #define DM_FORMAT_DEV_BUFSIZE 13 /* Minimum bufsize to handle worst case. */
int dm_format_dev(char *buf, int bufsize, uint32_t dev_major, uint32_t dev_minor); int dm_format_dev(char *buf, int bufsize, uint32_t dev_major, uint32_t dev_minor);
/* Use this to retrive target information returned from a STATUS call */ /* Use this to retrieve target information returned from a STATUS call */
void *dm_get_next_target(struct dm_task *dmt, void *dm_get_next_target(struct dm_task *dmt,
void *next, uint64_t *start, uint64_t *length, void *next, uint64_t *start, uint64_t *length,
char **target_type, char **params); char **target_type, char **params);
/* /*
* Following dm_get_status_* functions will allocate approriate status structure * Following dm_get_status_* functions will allocate appropriate status structure
* from passed mempool together with the necessary character arrays. * from passed mempool together with the necessary character arrays.
* Destroying the mempool will release all asociated allocation. * Destroying the mempool will release all associated allocation.
*/ */
/* Parse params from STATUS call for mirror target */ /* Parse params from STATUS call for mirror target */
@ -451,7 +451,7 @@ int dm_get_status_thin(struct dm_pool *mem, const char *params,
* *
* Operations on dm_stats objects include managing statistics regions * Operations on dm_stats objects include managing statistics regions
* and obtaining and manipulating current counter values from the * and obtaining and manipulating current counter values from the
* kernel. Methods are provided to return baisc count values and to * kernel. Methods are provided to return basic count values and to
* derive time-based metrics when a suitable interval estimate is * derive time-based metrics when a suitable interval estimate is
* provided. * provided.
* *
@ -544,7 +544,7 @@ int dm_stats_bind_from_fd(struct dm_stats *dms, int fd);
int dm_message_supports_precise_timestamps(void); int dm_message_supports_precise_timestamps(void);
/* /*
* Precise timetamps and histogram support. * Precise timestamps and histogram support.
* *
* Test for the presence of precise_timestamps and histogram support. * Test for the presence of precise_timestamps and histogram support.
*/ */
@ -554,7 +554,7 @@ int dm_stats_driver_supports_histogram(void);
/* /*
* Returns 1 if the specified region has the precise_timestamps feature * Returns 1 if the specified region has the precise_timestamps feature
* enabled (i.e. produces nanosecond-precision counter values) or 0 for * enabled (i.e. produces nanosecond-precision counter values) or 0 for
* a region using the default milisecond precision. * a region using the default millisecond precision.
*/ */
int dm_stats_get_region_precise_timestamps(const struct dm_stats *dms, int dm_stats_get_region_precise_timestamps(const struct dm_stats *dms,
uint64_t region_id); uint64_t region_id);
@ -563,7 +563,7 @@ int dm_stats_get_region_precise_timestamps(const struct dm_stats *dms,
* Returns 1 if the region at the current cursor location has the * Returns 1 if the region at the current cursor location has the
* precise_timestamps feature enabled (i.e. produces * precise_timestamps feature enabled (i.e. produces
* nanosecond-precision counter values) or 0 for a region using the * nanosecond-precision counter values) or 0 for a region using the
* default milisecond precision. * default millisecond precision.
*/ */
int dm_stats_get_current_region_precise_timestamps(const struct dm_stats *dms); int dm_stats_get_current_region_precise_timestamps(const struct dm_stats *dms);
@ -741,7 +741,7 @@ void dm_stats_buffer_destroy(struct dm_stats *dms, char *buffer);
* following a dm_stats_list() or dm_stats_populate() call. * following a dm_stats_list() or dm_stats_populate() call.
* *
* The value returned is the number of registered regions visible with the * The value returned is the number of registered regions visible with the
* progam_id value used for the list or populate operation and may not be * program_id value used for the list or populate operation and may not be
* equal to the highest present region_id (either due to program_id * equal to the highest present region_id (either due to program_id
* filtering or gaps in the sequence of region_id values). * filtering or gaps in the sequence of region_id values).
* *
@ -754,7 +754,7 @@ uint64_t dm_stats_get_nr_regions(const struct dm_stats *dms);
* following a dm_stats_list() or dm_stats_populate() call. * following a dm_stats_list() or dm_stats_populate() call.
* *
* The value returned is the number of registered groups visible with the * The value returned is the number of registered groups visible with the
* progam_id value used for the list or populate operation and may not be * program_id value used for the list or populate operation and may not be
* equal to the highest present group_id (either due to program_id * equal to the highest present group_id (either due to program_id
* filtering or gaps in the sequence of group_id values). * filtering or gaps in the sequence of group_id values).
* *
@ -813,7 +813,7 @@ int dm_stats_get_region_nr_histogram_bins(const struct dm_stats *dms,
* complete. * complete.
* *
* An optional unit suffix of 's', 'ms', 'us', or 'ns' may be used to * An optional unit suffix of 's', 'ms', 'us', or 'ns' may be used to
* specify units of seconds, miliseconds, microseconds, or nanoseconds: * specify units of seconds, milliseconds, microseconds, or nanoseconds:
* *
* bounds_str="1ns,1us,1ms,1s" * bounds_str="1ns,1us,1ms,1s"
* bounds_str="500us,1ms,1500us,2ms" * bounds_str="500us,1ms,1500us,2ms"
@ -821,12 +821,12 @@ int dm_stats_get_region_nr_histogram_bins(const struct dm_stats *dms,
* *
* The smallest valid unit of time for a histogram specification depends * The smallest valid unit of time for a histogram specification depends
* on whether the region uses precise timestamps: for a region with the * on whether the region uses precise timestamps: for a region with the
* default milisecond precision the smallest possible histogram boundary * default millisecond precision the smallest possible histogram boundary
* magnitude is one milisecond: attempting to use a histogram with a * magnitude is one millisecond: attempting to use a histogram with a
* boundary less than one milisecond when creating a region will cause * boundary less than one millisecond when creating a region will cause
* the region to be created with the precise_timestamps feature enabled. * the region to be created with the precise_timestamps feature enabled.
* *
* On sucess a pointer to the struct dm_histogram representing the * On success a pointer to the struct dm_histogram representing the
* bounds values is returned, or NULL in the case of error. The returned * bounds values is returned, or NULL in the case of error. The returned
* pointer should be freed using dm_free() when no longer required. * pointer should be freed using dm_free() when no longer required.
*/ */
@ -842,9 +842,9 @@ struct dm_histogram *dm_histogram_bounds_from_string(const char *bounds_str);
* *
* The smallest valid unit of time for a histogram specification depends * The smallest valid unit of time for a histogram specification depends
* on whether the region uses precise timestamps: for a region with the * on whether the region uses precise timestamps: for a region with the
* default milisecond precision the smallest possible histogram boundary * default millisecond precision the smallest possible histogram boundary
* magnitude is one milisecond: attempting to use a histogram with a * magnitude is one millisecond: attempting to use a histogram with a
* boundary less than one milisecond when creating a region will cause * boundary less than one millisecond when creating a region will cause
* the region to be created with the precise_timestamps feature enabled. * the region to be created with the precise_timestamps feature enabled.
*/ */
struct dm_histogram *dm_histogram_bounds_from_uint64(const uint64_t *bounds); struct dm_histogram *dm_histogram_bounds_from_uint64(const uint64_t *bounds);
@ -1501,7 +1501,7 @@ const char *dm_sysfs_dir(void);
/* /*
* Configure default UUID prefix string. * Configure default UUID prefix string.
* Conventionally this is a short capitalised prefix indicating the subsystem * Conventionally this is a short capitalized prefix indicating the subsystem
* that is managing the devices, e.g. "LVM-" or "MPATH-". * that is managing the devices, e.g. "LVM-" or "MPATH-".
* To support stacks of devices from different subsystems, recursive functions * To support stacks of devices from different subsystems, recursive functions
* stop recursing if they reach a device with a different prefix. * stop recursing if they reach a device with a different prefix.
@ -1544,7 +1544,7 @@ int dm_device_has_mounted_fs(uint32_t major, uint32_t minor);
/* /*
* Callback is invoked for individal mountinfo lines, * Callback is invoked for individual mountinfo lines,
* minor, major and mount target are parsed and unmangled. * minor, major and mount target are parsed and unmangled.
*/ */
typedef int (*dm_mountinfo_line_callback_fn) (char *line, unsigned maj, unsigned min, typedef int (*dm_mountinfo_line_callback_fn) (char *line, unsigned maj, unsigned min,
@ -1658,7 +1658,7 @@ void *dm_tree_node_get_context(const struct dm_tree_node *node);
/* /*
* Returns 0 when node size and its children is unchanged. * Returns 0 when node size and its children is unchanged.
* Returns 1 when node or any of its children has increased size. * Returns 1 when node or any of its children has increased size.
* Rerurns -1 when node or any of its children has reduced size. * Returns -1 when node or any of its children has reduced size.
*/ */
int dm_tree_node_size_changed(const struct dm_tree_node *dnode); int dm_tree_node_size_changed(const struct dm_tree_node *dnode);
@ -1845,7 +1845,7 @@ struct dm_tree_node_raid_params {
}; };
/* /*
* Version 2 of above node raid params struct to keeep API compatibility. * Version 2 of above node raid params struct to keep API compatibility.
* *
* Extended for more than 64 legs (max 253 in the MD kernel runtime!), * Extended for more than 64 legs (max 253 in the MD kernel runtime!),
* delta_disks for disk add/remove reshaping, * delta_disks for disk add/remove reshaping,
@ -1868,7 +1868,7 @@ struct dm_tree_node_raid_params_v2 {
* 'rebuilds' and 'writemostly' are bitfields that signify * 'rebuilds' and 'writemostly' are bitfields that signify
* which devices in the array are to be rebuilt or marked * which devices in the array are to be rebuilt or marked
* writemostly. The kernel supports up to 253 legs. * writemostly. The kernel supports up to 253 legs.
* We limit ourselvs by choosing a lower value * We limit ourselves by choosing a lower value
* for DEFAULT_RAID_MAX_IMAGES. * for DEFAULT_RAID_MAX_IMAGES.
*/ */
uint64_t rebuilds[RAID_BITMAP_SIZE]; uint64_t rebuilds[RAID_BITMAP_SIZE];
@ -1905,7 +1905,7 @@ struct dm_config_node;
* *
* policy_settings { * policy_settings {
* migration_threshold=2048 * migration_threshold=2048
* sequention_threashold=100 * sequential_threshold=100
* ... * ...
* } * }
* *
@ -1962,7 +1962,7 @@ int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
/* End of Replicator API */ /* End of Replicator API */
/* /*
* FIXME: Defines bellow are based on kernel's dm-thin.c defines * FIXME: Defines below are based on kernel's dm-thin.c defines
* DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT) * DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
* DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) * DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
*/ */
@ -2028,7 +2028,7 @@ int dm_tree_node_set_thin_pool_error_if_no_space(struct dm_tree_node *node,
int dm_tree_node_set_thin_pool_read_only(struct dm_tree_node *node, int dm_tree_node_set_thin_pool_read_only(struct dm_tree_node *node,
unsigned read_only); unsigned read_only);
/* /*
* FIXME: Defines bellow are based on kernel's dm-thin.c defines * FIXME: Defines below are based on kernel's dm-thin.c defines
* MAX_DEV_ID ((1 << 24) - 1) * MAX_DEV_ID ((1 << 24) - 1)
*/ */
#define DM_THIN_MAX_DEVICE_ID (UINT32_C((1 << 24) - 1)) #define DM_THIN_MAX_DEVICE_ID (UINT32_C((1 << 24) - 1))
@ -3561,7 +3561,7 @@ void dm_config_destroy(struct dm_config_tree *cft);
/* Simple output line by line. */ /* Simple output line by line. */
typedef int (*dm_putline_fn)(const char *line, void *baton); typedef int (*dm_putline_fn)(const char *line, void *baton);
/* More advaced output with config node reference. */ /* More advanced output with config node reference. */
typedef int (*dm_config_node_out_fn)(const struct dm_config_node *cn, const char *line, void *baton); typedef int (*dm_config_node_out_fn)(const struct dm_config_node *cn, const char *line, void *baton);
/* /*
@ -3624,7 +3624,7 @@ struct dm_config_node *dm_config_clone_node(struct dm_config_tree *cft, const st
* Common formatting flags applicable to all config node types (lower 16 bits). * Common formatting flags applicable to all config node types (lower 16 bits).
*/ */
#define DM_CONFIG_VALUE_FMT_COMMON_ARRAY 0x00000001 /* value is array */ #define DM_CONFIG_VALUE_FMT_COMMON_ARRAY 0x00000001 /* value is array */
#define DM_CONFIG_VALUE_FMT_COMMON_EXTRA_SPACES 0x00000002 /* add spaces in "key = value" pairs in constrast to "key=value" for better readability */ #define DM_CONFIG_VALUE_FMT_COMMON_EXTRA_SPACES 0x00000002 /* add spaces in "key = value" pairs in contrast to "key=value" for better readability */
/* /*
* Type-related config node formatting flags (higher 16 bits). * Type-related config node formatting flags (higher 16 bits).

View File

@ -1794,7 +1794,7 @@ static int _mountinfo_parse_line(const char *line, unsigned *maj, unsigned *min,
} }
/* /*
* Function to operate on individal mountinfo line, * Function to operate on individual mountinfo line,
* minor, major and mount target are parsed and unmangled * minor, major and mount target are parsed and unmangled
*/ */
int dm_mountinfo_read(dm_mountinfo_line_callback_fn read_fn, void *cb_data) int dm_mountinfo_read(dm_mountinfo_line_callback_fn read_fn, void *cb_data)

View File

@ -755,7 +755,7 @@ static int _match_aux(struct parser *p, int t)
} }
/* /*
* tokeniser * tokenizer
*/ */
static void _get_token(struct parser *p, int tok_prev) static void _get_token(struct parser *p, int tok_prev)
{ {

View File

@ -235,7 +235,7 @@ struct load_properties {
/* /*
* Preload tree normally only loads and not resume, but there is * Preload tree normally only loads and not resume, but there is
* automatic resume when target is extended, as it's believed * automatic resume when target is extended, as it's believed
* there can be no i/o flying to this 'new' extedend space * there can be no i/o flying to this 'new' extended space
* from any device above. Reason is that preloaded target above * from any device above. Reason is that preloaded target above
* may actually need to see its bigger subdevice before it * may actually need to see its bigger subdevice before it
* gets suspended. As long as devices are simple linears * gets suspended. As long as devices are simple linears
@ -248,8 +248,8 @@ struct load_properties {
/* /*
* Call node_send_messages(), set to 2 if there are messages * Call node_send_messages(), set to 2 if there are messages
* When != 0, it validates matching transaction id, thus thin-pools * When != 0, it validates matching transaction id, thus thin-pools
* where transation_id is passed as 0 are never validated, this * where transaction_id is passed as 0 are never validated, this
* allows external managment of thin-pool TID. * allows external management of thin-pool TID.
*/ */
unsigned send_messages; unsigned send_messages;
/* Skip suspending node's children, used when sending messages to thin-pool */ /* Skip suspending node's children, used when sending messages to thin-pool */
@ -2025,7 +2025,7 @@ int dm_tree_activate_children(struct dm_tree_node *dnode,
/* /*
* FIXME: Implement delayed error reporting * FIXME: Implement delayed error reporting
* activation should be stopped only in the case, * activation should be stopped only in the case,
* the submission of transation_id message fails, * the submission of transaction_id message fails,
* resume should continue further, just whole command * resume should continue further, just whole command
* has to report failure. * has to report failure.
*/ */
@ -2115,7 +2115,7 @@ static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *
return 1; return 1;
} }
/* simplify string emiting code */ /* simplify string emitting code */
#define EMIT_PARAMS(p, str...)\ #define EMIT_PARAMS(p, str...)\
do {\ do {\
int w;\ int w;\
@ -2924,7 +2924,7 @@ int dm_tree_preload_children(struct dm_tree_node *dnode,
if (!child->info.exists && !(node_created = _create_node(child, dnode))) if (!child->info.exists && !(node_created = _create_node(child, dnode)))
return_0; return_0;
/* Propagate delayed resume from exteded child node */ /* Propagate delayed resume from extended child node */
if (child->props.delay_resume_if_extended) if (child->props.delay_resume_if_extended)
dnode->props.delay_resume_if_extended = 1; dnode->props.delay_resume_if_extended = 1;
@ -3357,7 +3357,7 @@ int dm_tree_node_add_raid_target(struct dm_tree_node *node,
* - maximum 253 legs in a raid set (MD kernel limitation) * - maximum 253 legs in a raid set (MD kernel limitation)
* - delta_disks for disk add/remove reshaping * - delta_disks for disk add/remove reshaping
* - data_offset for out-of-place reshaping * - data_offset for out-of-place reshaping
* - data_copies to cope witth odd numbers of raid10 disks * - data_copies to cope with odd numbers of raid10 disks
*/ */
int dm_tree_node_add_raid_target_with_params_v2(struct dm_tree_node *node, int dm_tree_node_add_raid_target_with_params_v2(struct dm_tree_node *node,
uint64_t size, uint64_t size,

View File

@ -458,7 +458,7 @@ static int _report_field_string_list(struct dm_report *rh,
* *
* The very first item in the array of 'struct pos_len' is always * The very first item in the array of 'struct pos_len' is always
* a pair denoting '[list_size,strlen(field->report_string)]'. The * a pair denoting '[list_size,strlen(field->report_string)]'. The
* rest of items denote start and lenght of each item in the list. * rest of items denote start and length of each item in the list.
* *
* *
* For example, if we have a list with "abc", "xy", "defgh" * For example, if we have a list with "abc", "xy", "defgh"
@ -1395,7 +1395,7 @@ struct dm_report *dm_report_init(uint32_t *report_types,
} }
/* /*
* Return updated types value for further compatility check by caller. * Return updated types value for further compatibility check by caller.
*/ */
_dm_report_init_update_types(rh, report_types); _dm_report_init_update_types(rh, report_types);
@ -4023,7 +4023,7 @@ error:
return NULL; return NULL;
} }
/* AND_EXPRESSION := EX (AND_OP AND_EXPRSSION) */ /* AND_EXPRESSION := EX (AND_OP AND_EXPRESSION) */
static struct selection_node *_parse_and_ex(struct dm_report *rh, static struct selection_node *_parse_and_ex(struct dm_report *rh,
const char *s, const char *s,
const char **next, const char **next,

View File

@ -3589,7 +3589,7 @@ static struct dm_histogram *_alloc_dm_histogram(int nr_bins)
* 'us', 'ms', or 's' unit suffixes. * 'us', 'ms', or 's' unit suffixes.
* *
* The scale parameter indicates the timescale used for this region: one * The scale parameter indicates the timescale used for this region: one
* for nanoscale resolution and NSEC_PER_MSEC for miliseconds. * for nanoscale resolution and NSEC_PER_MSEC for milliseconds.
* *
* On return bounds contains a pointer to an array of uint64_t * On return bounds contains a pointer to an array of uint64_t
* histogram bounds values expressed in units of nanoseconds. * histogram bounds values expressed in units of nanoseconds.

View File

@ -485,8 +485,8 @@ static dm_status_mirror_health_t _get_health(char c)
} }
/* /*
* dm core parms: 0 409600 mirror * dm core params: 0 409600 mirror
* Mirror core parms: 2 253:4 253:5 400/400 * Mirror core params: 2 253:4 253:5 400/400
* New-style failure params: 1 AA * New-style failure params: 1 AA
* New-style log params: 3 cluster 253:3 A * New-style log params: 3 cluster 253:3 A
* or 3 disk 253:3 A * or 3 disk 253:3 A

View File

@ -242,7 +242,7 @@ ifeq (,$(findstring -z$(comma)now,$(LDFLAGS)))
LDFLAGS += -Wl,-z,now LDFLAGS += -Wl,-z,now
endif endif
# TODO: think about configure option for this flag # TODO: think about configure option for this flag
# for now leave decision on distro maitainer # for now leave decision on distro maintainer
#ifeq (,$(findstring -z$(comma)pack-relative-relocs,$(LDFLAGS))) #ifeq (,$(findstring -z$(comma)pack-relative-relocs,$(LDFLAGS)))
# LDFLAGS += -Wl,-z,pack-relative-relocs # LDFLAGS += -Wl,-z,pack-relative-relocs
#endif #endif

View File

@ -62,7 +62,7 @@
* *
* The UUID contained in the dm_ulog_request structure is the reference that * The UUID contained in the dm_ulog_request structure is the reference that
* will be used by all request types to a specific log. The constructor must * will be used by all request types to a specific log. The constructor must
* record this assotiation with the instance created. * record this association with the instance created.
* *
* When the request has been processed, user-space must return the * When the request has been processed, user-space must return the
* dm_ulog_request to the kernel - setting the 'error' field, filling the * dm_ulog_request to the kernel - setting the 'error' field, filling the

View File

@ -129,7 +129,7 @@ int dm_pool_locked(struct dm_pool *p)
* Bool specifies whether to store the pool crc/hash checksum. * Bool specifies whether to store the pool crc/hash checksum.
* *
* \return * \return
* 1 (success) when the pool was preperly locked, 0 otherwise. * 1 (success) when the pool was properly locked, 0 otherwise.
*/ */
int dm_pool_lock(struct dm_pool *p, int crc) int dm_pool_lock(struct dm_pool *p, int crc)
{ {

View File

@ -40,7 +40,7 @@ struct rx_node {
dm_bitset_t charset; dm_bitset_t charset;
struct rx_node *left, *right; struct rx_node *left, *right;
/* used to build the dfa for the toker */ /* used to build the dfa for the token */
unsigned charset_index; unsigned charset_index;
int nullable, final; int nullable, final;
dm_bitset_t firstpos; dm_bitset_t firstpos;

View File

@ -279,7 +279,7 @@ ifeq (,$(findstring -z$(comma)now,$(LDFLAGS)))
LDFLAGS += -Wl,-z,now LDFLAGS += -Wl,-z,now
endif endif
# TODO: think about configure option for this flag # TODO: think about configure option for this flag
# for now leave decision on distro maitainer # for now leave decision on distro maintainer
#ifeq (,$(findstring -z$(comma)pack-relative-relocs,$(LDFLAGS))) #ifeq (,$(findstring -z$(comma)pack-relative-relocs,$(LDFLAGS)))
# LDFLAGS += -Wl,-z,pack-relative-relocs # LDFLAGS += -Wl,-z,pack-relative-relocs
#endif #endif

View File

@ -66,7 +66,7 @@ install_libexec: lvresize_fs_helper.sh
install: install_lvm2 install_ocf install_device-mapper install: install_lvm2 install_ocf install_device-mapper
# FIXME Customise for other distributions # FIXME Customize for other distributions
install_initscripts: install_initscripts:
$(SHOW) " [INSTALL] initscripts" $(SHOW) " [INSTALL] initscripts"
$(Q) $(INSTALL_DIR) $(initdir) $(Q) $(INSTALL_DIR) $(initdir)

Some files were not shown because too many files have changed in this diff Show More