1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00

Use early udev synchronisation and update of dev nodes for clustered mirrors.

When using clustered mirrors, we need device nodes to be created during
processing of device tree, not at its end like we normally do (we need to
access the nodes in cmirror prematurely). Therefore we use a new flag called
"immediate_dev_node" stored in deptree's load_properties struct to instruct the
device tree processing code to immediately synchronize with udev and flush all
stacked node operations so the nodes are prepared for use.

For now, the immediate_dev_node is used for clustered mirrors during
processing the dm_tree_preload_children code only. We can add more later if
needed.
This commit is contained in:
Peter Rajnoha 2010-06-21 08:54:32 +00:00
parent 2995925278
commit 566515c095
2 changed files with 28 additions and 0 deletions

View File

@ -1,5 +1,6 @@
Version 2.02.68 - Version 2.02.68 -
=============================== ===============================
Use early udev synchronisation and update of dev nodes for clustered mirrors.
Unneeded kdev_t.h reference causing harm for cmirrord on some archs. Unneeded kdev_t.h reference causing harm for cmirrord on some archs.
Add man pages for lvmconf and unsupported lvmsadc and lvmsar tools. Add man pages for lvmconf and unsupported lvmsadc and lvmsar tools.
Fix exit code when requesting help using documented -o help option. Fix exit code when requesting help using documented -o help option.

View File

@ -143,6 +143,13 @@ struct load_properties {
struct dm_list segs; struct dm_list segs;
const char *new_name; const char *new_name;
/* If immediate_dev_node is set to 1, try to create the dev node
* as soon as possible (e.g. in preload stage even during traversal
* and processing of dm tree). This will also flush all stacked dev
* node operations, synchronizing with udev.
*/
int immediate_dev_node;
}; };
/* Two of these used to join two nodes with uses and used_by. */ /* Two of these used to join two nodes with uses and used_by. */
@ -1843,6 +1850,7 @@ int dm_tree_preload_children(struct dm_tree_node *dnode,
void *handle = NULL; void *handle = NULL;
struct dm_tree_node *child; struct dm_tree_node *child;
struct dm_info newinfo; struct dm_info newinfo;
int update_devs_flag = 0;
/* Preload children first */ /* Preload children first */
while ((child = dm_tree_next_child(&handle, dnode, 0))) { while ((child = dm_tree_next_child(&handle, dnode, 0))) {
@ -1897,10 +1905,26 @@ int dm_tree_preload_children(struct dm_tree_node *dnode,
/* Update cached info */ /* Update cached info */
child->info = newinfo; child->info = newinfo;
/*
* Prepare for immediate synchronization with udev and flush all stacked
* dev node operations if requested by immediate_dev_node property. But
* finish processing current level in the tree first.
*/
if (child->props.immediate_dev_node)
update_devs_flag = 1;
} }
handle = NULL; handle = NULL;
if (update_devs_flag) {
if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
stack;
dm_tree_set_cookie(dnode, 0);
dm_task_update_nodes();
}
return r; return r;
} }
@ -2157,6 +2181,9 @@ int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
return 0; return 0;
} }
if (clustered)
log_node->props.immediate_dev_node = 1;
if (!_link_tree_nodes(node, log_node)) if (!_link_tree_nodes(node, log_node))
return_0; return_0;
} }