mirror of
git://sourceware.org/git/lvm2.git
synced 2024-12-21 13:34:40 +03:00
devices: simpler dm devs cache naming
The dm devs cache is separate from the ordinary dev cache, so give the function names distinct prefixes, using "dm_devs_cache" to prefix dm devs cache functions.
This commit is contained in:
parent
8b4701d7c4
commit
ad1d688734
@ -766,9 +766,9 @@ int dm_device_is_usable(struct cmd_context *cmd, struct device *dev, struct dev_
|
||||
int only_error_or_zero_target = 1;
|
||||
int r = 0;
|
||||
|
||||
if (dev_cache_use_dm_devs_cache() &&
|
||||
if (dm_devs_cache_use() &&
|
||||
/* With cache we can avoid status calls for unusable UUIDs */
|
||||
(dm_dev = dev_cache_get_dm_dev_by_devno(cmd, dev->dev)) &&
|
||||
(dm_dev = dm_devs_cache_get_by_devno(cmd, dev->dev)) &&
|
||||
!_is_usable_uuid(dev, dm_dev->name, dm_dev->uuid, check.check_reserved, check.check_lv, is_lv))
|
||||
return 0;
|
||||
|
||||
@ -897,8 +897,8 @@ int devno_dm_uuid(struct cmd_context *cmd, int major, int minor,
|
||||
const char *uuid;
|
||||
int r = 0;
|
||||
|
||||
if (dev_cache_use_dm_devs_cache()) {
|
||||
if ((dm_dev = dev_cache_get_dm_dev_by_devno(cmd, MKDEV(major, minor)))) {
|
||||
if (dm_devs_cache_use()) {
|
||||
if ((dm_dev = dm_devs_cache_get_by_devno(cmd, MKDEV(major, minor)))) {
|
||||
dm_strncpy(uuid_buf, dm_dev->uuid, uuid_buf_size);
|
||||
return 1;
|
||||
}
|
||||
@ -1085,9 +1085,9 @@ int dev_manager_info(struct cmd_context *cmd,
|
||||
|
||||
dm_strncpy(old_style_dlid, dlid, sizeof(old_style_dlid));
|
||||
|
||||
if (dev_cache_use_dm_devs_cache() &&
|
||||
!dev_cache_get_dm_dev_by_uuid(cmd, dlid) &&
|
||||
!dev_cache_get_dm_dev_by_uuid(cmd, old_style_dlid)) {
|
||||
if (dm_devs_cache_use() &&
|
||||
!dm_devs_cache_get_by_uuid(cmd, dlid) &&
|
||||
!dm_devs_cache_get_by_uuid(cmd, old_style_dlid)) {
|
||||
log_debug("Cached as inactive %s.", name);
|
||||
if (dminfo)
|
||||
memset(dminfo, 0, sizeof(*dminfo));
|
||||
@ -2459,8 +2459,8 @@ static int _add_dev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
|
||||
if (!(dlid = build_dm_uuid(dm->track_pending_delete ? dm->cmd->pending_delete_mem : dm->mem, lv, layer)))
|
||||
return_0;
|
||||
|
||||
if (dev_cache_use_dm_devs_cache()) {
|
||||
if (!(dm_dev = dev_cache_get_dm_dev_by_uuid(dm->cmd, dlid))) {
|
||||
if (dm_devs_cache_use()) {
|
||||
if (!(dm_dev = dm_devs_cache_get_by_uuid(dm->cmd, dlid))) {
|
||||
log_debug("Cached as not present %s.", name);
|
||||
return 1;
|
||||
}
|
||||
@ -2614,7 +2614,7 @@ static int _pool_callback(struct dm_tree_node *node,
|
||||
}
|
||||
}
|
||||
|
||||
dev_cache_destroy_dm_devs();
|
||||
dm_devs_cache_destroy();
|
||||
|
||||
log_debug("Running check command on %s", mpath);
|
||||
|
||||
@ -3998,7 +3998,7 @@ static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
|
||||
|
||||
/* Drop any cache before DM table manipulation within locked section
|
||||
* TODO: check if it makes sense to manage cache within lock */
|
||||
dev_cache_destroy_dm_devs();
|
||||
dm_devs_cache_destroy();
|
||||
|
||||
dtree = _create_partial_dtree(dm, lv, laopts->origin_only);
|
||||
|
||||
|
@ -1328,12 +1328,12 @@ out:
|
||||
return r;
|
||||
}
|
||||
|
||||
int dev_cache_use_dm_devs_cache(void)
|
||||
int dm_devs_cache_use(void)
|
||||
{
|
||||
return _cache.use_dm_devs_cache;
|
||||
}
|
||||
|
||||
void dev_cache_destroy_dm_devs(void)
|
||||
void dm_devs_cache_destroy(void)
|
||||
{
|
||||
_cache.use_dm_devs_cache = 0;
|
||||
|
||||
@ -1350,13 +1350,13 @@ void dev_cache_destroy_dm_devs(void)
|
||||
dm_device_list_destroy(&_cache.dm_devs);
|
||||
}
|
||||
|
||||
int dev_cache_update_dm_devs(void)
|
||||
int dm_devs_cache_update(void)
|
||||
{
|
||||
struct dm_active_device *dm_dev;
|
||||
unsigned devs_features;
|
||||
uint32_t d;
|
||||
|
||||
dev_cache_destroy_dm_devs();
|
||||
dm_devs_cache_destroy();
|
||||
|
||||
if (!get_dm_active_devices(NULL, &_cache.dm_devs, &devs_features))
|
||||
return 1;
|
||||
@ -1396,7 +1396,7 @@ int dev_cache_update_dm_devs(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
void dev_cache_dm_devs_label_invalidate(struct cmd_context *cmd)
|
||||
void dm_devs_cache_label_invalidate(struct cmd_context *cmd)
|
||||
{
|
||||
struct dm_active_device *dm_dev;
|
||||
struct device *dev;
|
||||
@ -1412,7 +1412,7 @@ void dev_cache_dm_devs_label_invalidate(struct cmd_context *cmd)
|
||||
|
||||
/* Find active DM device in devs array for given major:minor */
|
||||
const struct dm_active_device *
|
||||
dev_cache_get_dm_dev_by_devno(struct cmd_context *cmd, dev_t devno)
|
||||
dm_devs_cache_get_by_devno(struct cmd_context *cmd, dev_t devno)
|
||||
{
|
||||
uint32_t d = _shuffle_devno(devno);
|
||||
|
||||
@ -1424,7 +1424,7 @@ dev_cache_get_dm_dev_by_devno(struct cmd_context *cmd, dev_t devno)
|
||||
|
||||
/* Find active DM device in devs array for given DM UUID */
|
||||
const struct dm_active_device *
|
||||
dev_cache_get_dm_dev_by_uuid(struct cmd_context *cmd, const char *dm_uuid)
|
||||
dm_devs_cache_get_by_uuid(struct cmd_context *cmd, const char *dm_uuid)
|
||||
{
|
||||
if (!_cache.dm_uuids)
|
||||
return NULL;
|
||||
@ -1538,7 +1538,7 @@ int dev_cache_exit(void)
|
||||
vt.num_open);
|
||||
}
|
||||
|
||||
dev_cache_destroy_dm_devs();
|
||||
dm_devs_cache_destroy();
|
||||
|
||||
if (_cache.mem)
|
||||
dm_pool_destroy(_cache.mem);
|
||||
|
@ -37,14 +37,24 @@ struct dev_filter {
|
||||
struct dm_list *dev_cache_get_dev_list_for_vgid(const char *vgid);
|
||||
struct dm_list *dev_cache_get_dev_list_for_lvid(const char *lvid);
|
||||
|
||||
int dev_cache_use_dm_devs_cache(void);
|
||||
int dev_cache_update_dm_devs(void);
|
||||
void dev_cache_destroy_dm_devs(void);
|
||||
void dev_cache_dm_devs_label_invalidate(struct cmd_context *cmd);
|
||||
/*
|
||||
* The cache of dm devices is enabled when the kernel
|
||||
* supports the ability to quickly report on many dm
|
||||
* devs together, in which case we can get all the dm
|
||||
* info at once and store it in this dm_devs_cache.
|
||||
* This avoids many individual dm dev ioctl calls.
|
||||
* The callers of these dm_devs_cache functions must
|
||||
* have an alternative for when dm_devs_cache_use()
|
||||
* returns 0.
|
||||
*/
|
||||
int dm_devs_cache_use(void);
|
||||
int dm_devs_cache_update(void);
|
||||
void dm_devs_cache_destroy(void);
|
||||
void dm_devs_cache_label_invalidate(struct cmd_context *cmd);
|
||||
const struct dm_active_device *
|
||||
dev_cache_get_dm_dev_by_devno(struct cmd_context *cmd, dev_t devno);
|
||||
dm_devs_cache_get_by_devno(struct cmd_context *cmd, dev_t devno);
|
||||
const struct dm_active_device *
|
||||
dev_cache_get_dm_dev_by_uuid(struct cmd_context *cmd, const char *dm_uuid);
|
||||
dm_devs_cache_get_by_uuid(struct cmd_context *cmd, const char *dm_uuid);
|
||||
|
||||
/*
|
||||
* The global device cache.
|
||||
|
@ -1263,7 +1263,7 @@ int label_scan(struct cmd_context *cmd)
|
||||
* here, before processing the hints file, so that the dm uuid checks
|
||||
* in hint processing can benefit from the dm uuid cache.)
|
||||
*/
|
||||
if (!dev_cache_update_dm_devs())
|
||||
if (!dm_devs_cache_update())
|
||||
return_0;
|
||||
|
||||
/*
|
||||
@ -1670,8 +1670,8 @@ void label_scan_invalidate_lvs(struct cmd_context *cmd, struct dm_list *lvs)
|
||||
|
||||
log_debug("Invalidating devs for any PVs on LVs.");
|
||||
|
||||
if (dev_cache_use_dm_devs_cache())
|
||||
dev_cache_dm_devs_label_invalidate(cmd);
|
||||
if (dm_devs_cache_use())
|
||||
dm_devs_cache_label_invalidate(cmd);
|
||||
else {
|
||||
dm_list_iterate_items(lvl, lvs)
|
||||
label_scan_invalidate_lv(cmd, lvl->lv);
|
||||
|
@ -330,7 +330,7 @@ int vg_write_lock_held(void)
|
||||
|
||||
int sync_local_dev_names(struct cmd_context* cmd)
|
||||
{
|
||||
dev_cache_destroy_dm_devs();
|
||||
dm_devs_cache_destroy();
|
||||
memlock_unlock(cmd);
|
||||
fs_unlock();
|
||||
return 1;
|
||||
|
Loading…
Reference in New Issue
Block a user