2005-10-16 18:33:22 +04:00
/*
2017-02-03 22:39:40 +03:00
* Copyright ( C ) 2005 - 2017 Red Hat , Inc . All rights reserved .
2005-10-16 18:33:22 +04:00
*
* This file is part of the device - mapper userspace tools .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2005-10-16 18:33:22 +04:00
*/
2018-05-14 12:30:20 +03:00
# include "libdm/misc/dmlib.h"
2005-10-16 18:33:22 +04:00
# include "libdm-targets.h"
# include "libdm-common.h"
2018-05-14 12:30:20 +03:00
# include "libdm/misc/kdev_t.h"
# include "libdm/misc/dm-ioctl.h"
2005-10-16 18:33:22 +04:00
# include <stdarg.h>
# include <sys/param.h>
2009-08-13 20:31:01 +04:00
# include <sys/utsname.h>
2005-10-16 18:33:22 +04:00
2005-11-09 01:50:11 +03:00
# define MAX_TARGET_PARAMSIZE 500000
/* Supported segment types */
enum {
2014-01-27 15:29:35 +04:00
SEG_CACHE ,
2009-06-09 20:10:20 +04:00
SEG_CRYPT ,
SEG_ERROR ,
2005-11-09 01:50:11 +03:00
SEG_LINEAR ,
SEG_MIRRORED ,
SEG_SNAPSHOT ,
SEG_SNAPSHOT_ORIGIN ,
2010-01-13 04:39:44 +03:00
SEG_SNAPSHOT_MERGE ,
2005-11-09 01:50:11 +03:00
SEG_STRIPED ,
SEG_ZERO ,
2011-09-29 12:53:48 +04:00
SEG_THIN_POOL ,
SEG_THIN ,
2016-05-23 18:46:38 +03:00
SEG_RAID0 ,
2016-07-02 00:20:54 +03:00
SEG_RAID0_META ,
2011-08-03 02:07:20 +04:00
SEG_RAID1 ,
2012-08-25 00:34:19 +04:00
SEG_RAID10 ,
2011-08-03 02:07:20 +04:00
SEG_RAID4 ,
2017-02-03 22:39:40 +03:00
SEG_RAID5_N ,
2011-08-03 02:07:20 +04:00
SEG_RAID5_LA ,
SEG_RAID5_RA ,
SEG_RAID5_LS ,
SEG_RAID5_RS ,
2017-02-04 03:40:58 +03:00
SEG_RAID6_N_6 ,
2011-08-03 02:07:20 +04:00
SEG_RAID6_ZR ,
SEG_RAID6_NR ,
SEG_RAID6_NC ,
lvconvert: add segtypes raid6_{ls,rs,la,ra}_6 and conversions to/from it
Add:
- support for segment types raid6_{ls,rs,la,ra}_6
(striped raid with dedicated last Q-Syndrome SubLVs)
- conversion support from raid5_{ls,rs,la,ra} to/from raid6_{ls,rs,la,ra}_6
- setting convenient segtypes on conversions from/to raid4/5/6
- related tests to lvconvert-raid-takeover.sh factoring
out _lvcreate,_lvconvert funxtions
Related: rhbz1366296
2017-02-05 02:53:36 +03:00
SEG_RAID6_LS_6 ,
SEG_RAID6_RS_6 ,
SEG_RAID6_LA_6 ,
SEG_RAID6_RA_6 ,
2005-11-09 01:50:11 +03:00
} ;
2005-11-09 17:10:50 +03:00
2005-11-09 01:50:11 +03:00
/* FIXME Add crypt and multipath support */
2014-04-05 00:34:22 +04:00
static const struct {
2005-11-09 01:50:11 +03:00
unsigned type ;
2014-04-05 00:34:22 +04:00
const char target [ 16 ] ;
} _dm_segtypes [ ] = {
2014-01-27 15:29:35 +04:00
{ SEG_CACHE , " cache " } ,
2009-06-09 20:10:20 +04:00
{ SEG_CRYPT , " crypt " } ,
2005-11-09 01:50:11 +03:00
{ SEG_ERROR , " error " } ,
{ SEG_LINEAR , " linear " } ,
{ SEG_MIRRORED , " mirror " } ,
{ SEG_SNAPSHOT , " snapshot " } ,
{ SEG_SNAPSHOT_ORIGIN , " snapshot-origin " } ,
2010-01-13 04:39:44 +03:00
{ SEG_SNAPSHOT_MERGE , " snapshot-merge " } ,
2005-11-09 01:50:11 +03:00
{ SEG_STRIPED , " striped " } ,
{ SEG_ZERO , " zero " } ,
2011-09-29 12:53:48 +04:00
{ SEG_THIN_POOL , " thin-pool " } ,
{ SEG_THIN , " thin " } ,
2016-05-23 18:46:38 +03:00
{ SEG_RAID0 , " raid0 " } ,
2016-07-02 00:20:54 +03:00
{ SEG_RAID0_META , " raid0_meta " } ,
2011-08-03 02:07:20 +04:00
{ SEG_RAID1 , " raid1 " } ,
2012-08-25 00:34:19 +04:00
{ SEG_RAID10 , " raid10 " } ,
2011-08-03 02:07:20 +04:00
{ SEG_RAID4 , " raid4 " } ,
2017-02-03 22:39:40 +03:00
{ SEG_RAID5_N , " raid5_n " } ,
2011-08-03 02:07:20 +04:00
{ SEG_RAID5_LA , " raid5_la " } ,
{ SEG_RAID5_RA , " raid5_ra " } ,
{ SEG_RAID5_LS , " raid5_ls " } ,
{ SEG_RAID5_RS , " raid5_rs " } ,
2017-02-04 03:40:58 +03:00
{ SEG_RAID6_N_6 , " raid6_n_6 " } ,
2011-08-03 02:07:20 +04:00
{ SEG_RAID6_ZR , " raid6_zr " } ,
{ SEG_RAID6_NR , " raid6_nr " } ,
{ SEG_RAID6_NC , " raid6_nc " } ,
lvconvert: add segtypes raid6_{ls,rs,la,ra}_6 and conversions to/from it
Add:
- support for segment types raid6_{ls,rs,la,ra}_6
(striped raid with dedicated last Q-Syndrome SubLVs)
- conversion support from raid5_{ls,rs,la,ra} to/from raid6_{ls,rs,la,ra}_6
- setting convenient segtypes on conversions from/to raid4/5/6
- related tests to lvconvert-raid-takeover.sh factoring
out _lvcreate,_lvconvert funxtions
Related: rhbz1366296
2017-02-05 02:53:36 +03:00
{ SEG_RAID6_LS_6 , " raid6_ls_6 " } ,
{ SEG_RAID6_RS_6 , " raid6_rs_6 " } ,
{ SEG_RAID6_LA_6 , " raid6_la_6 " } ,
{ SEG_RAID6_RA_6 , " raid6_ra_6 " } ,
2011-09-29 12:50:54 +04:00
/*
2015-09-23 17:53:27 +03:00
* WARNING : Since ' raid ' target overloads this 1 : 1 mapping table
2011-09-29 12:50:54 +04:00
* for search do not add new enum elements past them !
*/
2011-08-03 02:07:20 +04:00
{ SEG_RAID5_LS , " raid5 " } , /* same as "raid5_ls" (default for MD also) */
{ SEG_RAID6_ZR , " raid6 " } , /* same as "raid6_zr" */
2017-04-12 02:28:22 +03:00
{ SEG_RAID10 , " raid10_near " } , /* same as "raid10" */
2005-11-09 01:50:11 +03:00
} ;
/* Some segment types have a list of areas of other devices attached */
struct seg_area {
2008-11-04 01:14:30 +03:00
struct dm_list list ;
2005-11-09 01:50:11 +03:00
2005-11-09 17:10:50 +03:00
struct dm_tree_node * dev_node ;
2005-11-09 01:50:11 +03:00
uint64_t offset ;
2010-05-21 16:24:15 +04:00
} ;
2011-11-03 18:45:01 +04:00
struct dm_thin_message {
dm_thin_message_t type ;
union {
struct {
uint32_t device_id ;
uint32_t origin_id ;
} m_create_snap ;
struct {
uint32_t device_id ;
} m_create_thin ;
struct {
uint32_t device_id ;
} m_delete ;
struct {
uint64_t current_id ;
uint64_t new_id ;
} m_set_transaction_id ;
} u ;
} ;
2011-10-17 18:16:25 +04:00
struct thin_message {
struct dm_list list ;
struct dm_thin_message message ;
2011-10-19 20:36:01 +04:00
int expected_errno ;
2011-10-17 18:16:25 +04:00
} ;
2005-11-09 01:50:11 +03:00
/* Per-segment properties */
struct load_segment {
2008-11-04 01:14:30 +03:00
struct dm_list list ;
2005-11-09 01:50:11 +03:00
unsigned type ;
uint64_t size ;
2017-12-04 17:27:05 +03:00
unsigned area_count ; /* Linear + Striped + Mirrored + Crypt */
struct dm_list areas ; /* Linear + Striped + Mirrored + Crypt */
2005-11-09 01:50:11 +03:00
2011-08-03 02:07:20 +04:00
uint32_t stripe_size ; /* Striped + raid */
2005-11-09 01:50:11 +03:00
int persistent ; /* Snapshot */
2015-08-11 14:30:02 +03:00
uint32_t chunk_size ; /* Snapshot */
2005-11-09 17:10:50 +03:00
struct dm_tree_node * cow ; /* Snapshot */
2014-01-27 15:29:35 +04:00
struct dm_tree_node * origin ; /* Snapshot + Snapshot origin + Cache */
2010-01-13 04:39:44 +03:00
struct dm_tree_node * merge ; /* Snapshot */
2005-11-09 01:50:11 +03:00
2017-12-04 17:27:05 +03:00
struct dm_tree_node * log ; /* Mirror */
2011-08-03 02:07:20 +04:00
uint32_t region_size ; /* Mirror + raid */
2005-11-09 01:50:11 +03:00
unsigned clustered ; /* Mirror */
unsigned mirror_area_count ; /* Mirror */
2014-01-27 15:29:35 +04:00
uint32_t flags ; /* Mirror + raid + Cache */
2006-02-06 23:18:10 +03:00
char * uuid ; /* Clustered mirror log */
2009-06-09 20:10:20 +04:00
2014-04-01 16:41:29 +04:00
const char * policy_name ; /* Cache */
2014-02-15 01:33:09 +04:00
unsigned policy_argc ; /* Cache */
2014-11-11 01:41:03 +03:00
struct dm_config_node * policy_settings ; /* Cache */
2014-01-27 15:29:35 +04:00
2009-06-09 20:10:20 +04:00
const char * cipher ; /* Crypt */
const char * chainmode ; /* Crypt */
const char * iv ; /* Crypt */
uint64_t iv_offset ; /* Crypt */
const char * key ; /* Crypt */
2010-05-21 16:24:15 +04:00
2017-02-24 02:50:00 +03:00
int delta_disks ; /* raid reshape number of disks */
int data_offset ; /* raid reshape data offset on disk to set */
uint64_t rebuilds [ RAID_BITMAP_SIZE ] ; /* raid */
uint64_t writemostly [ RAID_BITMAP_SIZE ] ; /* raid */
2013-05-31 20:25:52 +04:00
uint32_t writebehind ; /* raid */
uint32_t max_recovery_rate ; /* raid kB/sec/disk */
uint32_t min_recovery_rate ; /* raid kB/sec/disk */
2017-02-24 02:50:00 +03:00
uint32_t data_copies ; /* raid10 data_copies */
2011-09-29 12:53:48 +04:00
2014-01-27 15:29:35 +04:00
struct dm_tree_node * metadata ; /* Thin_pool + Cache */
2011-09-29 12:53:48 +04:00
struct dm_tree_node * pool ; /* Thin_pool, Thin */
2012-02-04 16:49:40 +04:00
struct dm_tree_node * external ; /* Thin */
2011-10-17 18:16:25 +04:00
struct dm_list thin_messages ; /* Thin_pool */
2011-10-31 02:04:57 +04:00
uint64_t transaction_id ; /* Thin_pool */
2011-10-20 14:33:30 +04:00
uint64_t low_water_mark ; /* Thin_pool */
2015-08-11 14:30:02 +03:00
uint32_t data_block_size ; /* Thin_pool + cache */
2019-01-20 13:45:27 +03:00
uint32_t migration_threshold ; /* Cache */
2011-10-04 20:22:38 +04:00
unsigned skip_block_zeroing ; /* Thin_pool */
2012-08-07 21:34:30 +04:00
unsigned ignore_discard ; /* Thin_pool target vsn 1.1 */
unsigned no_discard_passdown ; /* Thin_pool target vsn 1.1 */
2015-01-13 17:23:03 +03:00
unsigned error_if_no_space ; /* Thin pool target vsn 1.10 */
2015-06-18 16:15:39 +03:00
unsigned read_only ; /* Thin pool target vsn 1.3 */
2011-09-29 12:53:48 +04:00
uint32_t device_id ; /* Thin */
2005-11-09 01:50:11 +03:00
} ;
/* Per-device properties */
struct load_properties {
int read_only ;
uint32_t major ;
uint32_t minor ;
2007-11-27 23:57:05 +03:00
uint32_t read_ahead ;
uint32_t read_ahead_flags ;
2005-11-09 01:50:11 +03:00
unsigned segment_count ;
2015-10-25 21:27:09 +03:00
int size_changed ;
2008-11-04 01:14:30 +03:00
struct dm_list segs ;
2005-11-09 01:50:11 +03:00
const char * new_name ;
2010-06-21 12:54:32 +04:00
/* If immediate_dev_node is set to 1, try to create the dev node
* as soon as possible ( e . g . in preload stage even during traversal
* and processing of dm tree ) . This will also flush all stacked dev
* node operations , synchronizing with udev .
*/
2011-06-11 04:03:06 +04:00
unsigned immediate_dev_node ;
/*
* If the device size changed from zero and this is set ,
* don ' t resume the device immediately , even if the device
* has parents . This works provided the parents do not
* validate the device size and is required by pvmove to
* avoid starting the mirror resync operation too early .
*/
unsigned delay_resume_if_new ;
2011-10-31 02:04:57 +04:00
2017-12-07 22:28:03 +03:00
/*
* Preload tree normally only loads and not resume , but there is
* automatic resume when target is extended , as it ' s believed
* there can be no i / o flying to this ' new ' extedend space
* from any device above . Reason is that preloaded target above
* may actually need to see its bigger subdevice before it
* gets suspended . As long as devices are simple linears
* there is no problem to resume bigger device in preload ( before commit ) .
* However complex targets like thin - pool ( raid , cache . . . )
* they shall not be resumed before their commit .
*/
unsigned delay_resume_if_extended ;
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
/*
* Call node_send_messages ( ) , set to 2 if there are messages
* When ! = 0 , it validates matching transaction id , thus thin - pools
* where transation_id is passed as 0 are never validated , this
* allows external managment of thin - pool TID .
*/
2011-10-31 02:04:57 +04:00
unsigned send_messages ;
2015-07-01 14:30:14 +03:00
/* Skip suspending node's children, used when sending messages to thin-pool */
int skip_suspend ;
2005-11-09 01:50:11 +03:00
} ;
/* Two of these used to join two nodes with uses and used_by. */
2005-11-09 17:10:50 +03:00
struct dm_tree_link {
2008-11-04 01:14:30 +03:00
struct dm_list list ;
2005-11-09 17:10:50 +03:00
struct dm_tree_node * node ;
2005-11-09 01:50:11 +03:00
} ;
2005-11-09 17:10:50 +03:00
struct dm_tree_node {
struct dm_tree * dtree ;
2005-10-16 18:33:22 +04:00
2011-08-19 21:02:48 +04:00
const char * name ;
const char * uuid ;
struct dm_info info ;
2005-10-16 18:33:22 +04:00
2011-08-19 21:02:48 +04:00
struct dm_list uses ; /* Nodes this node uses */
struct dm_list used_by ; /* Nodes that use this node */
2005-11-09 01:50:11 +03:00
2005-11-22 23:00:35 +03:00
int activation_priority ; /* 0 gets activated first */
2014-04-07 22:21:20 +04:00
int implicit_deps ; /* 1 device only implicitly referenced */
2005-11-22 23:00:35 +03:00
2009-10-22 17:00:07 +04:00
uint16_t udev_flags ; /* Udev control flags */
2005-11-09 01:50:11 +03:00
void * context ; /* External supplied context */
struct load_properties props ; /* For creation/table (re)load */
2010-05-21 16:27:02 +04:00
/*
* If presuspend of child node is needed
* Note : only direct child is allowed
*/
struct dm_tree_node * presuspend_node ;
2012-03-02 21:31:21 +04:00
/* Callback */
dm_node_callback_fn callback ;
void * callback_data ;
2014-01-15 15:42:47 +04:00
2021-09-12 22:15:31 +03:00
int activated ; /* tracks activation during preload */
2005-10-16 18:33:22 +04:00
} ;
2005-11-09 17:10:50 +03:00
struct dm_tree {
2005-10-17 02:57:20 +04:00
struct dm_pool * mem ;
struct dm_hash_table * devs ;
2005-11-09 01:50:11 +03:00
struct dm_hash_table * uuids ;
2005-11-09 17:10:50 +03:00
struct dm_tree_node root ;
2006-01-31 02:36:04 +03:00
int skip_lockfs ; /* 1 skips lockfs (for non-snapshots) */
2011-09-22 21:36:50 +04:00
int no_flush ; /* 1 sets noflush (mirrors/multipath) */
int retry_remove ; /* 1 retries remove if not successful */
2009-07-31 22:30:31 +04:00
uint32_t cookie ;
2015-07-01 10:54:57 +03:00
char buf [ DM_NAME_LEN + 32 ] ; /* print buffer for device_name (major:minor) */
2014-07-31 00:55:11 +04:00
const char * * optional_uuid_suffixes ; /* uuid suffixes ignored when matching */
2005-10-16 18:33:22 +04:00
} ;
2012-01-23 21:46:31 +04:00
/*
* Tree functions .
*/
2005-11-09 17:10:50 +03:00
struct dm_tree * dm_tree_create ( void )
2005-10-16 18:33:22 +04:00
{
2011-10-14 17:34:19 +04:00
struct dm_pool * dmem ;
2005-11-09 17:10:50 +03:00
struct dm_tree * dtree ;
2005-10-16 18:33:22 +04:00
2011-10-14 17:34:19 +04:00
if ( ! ( dmem = dm_pool_create ( " dtree " , 1024 ) ) | |
! ( dtree = dm_pool_zalloc ( dmem , sizeof ( * dtree ) ) ) ) {
log_error ( " Failed to allocate dtree. " ) ;
if ( dmem )
dm_pool_destroy ( dmem ) ;
2005-10-16 18:33:22 +04:00
return NULL ;
}
2005-11-09 17:10:50 +03:00
dtree - > root . dtree = dtree ;
2008-11-04 01:14:30 +03:00
dm_list_init ( & dtree - > root . uses ) ;
dm_list_init ( & dtree - > root . used_by ) ;
2006-01-31 02:36:04 +03:00
dtree - > skip_lockfs = 0 ;
2007-01-09 22:44:07 +03:00
dtree - > no_flush = 0 ;
2011-10-14 17:34:19 +04:00
dtree - > mem = dmem ;
2014-07-31 00:55:11 +04:00
dtree - > optional_uuid_suffixes = NULL ;
2005-10-16 18:33:22 +04:00
2005-11-09 17:10:50 +03:00
if ( ! ( dtree - > devs = dm_hash_create ( 8 ) ) ) {
log_error ( " dtree hash creation failed " ) ;
dm_pool_destroy ( dtree - > mem ) ;
2005-10-16 18:33:22 +04:00
return NULL ;
}
2005-11-09 17:10:50 +03:00
if ( ! ( dtree - > uuids = dm_hash_create ( 32 ) ) ) {
log_error ( " dtree uuid hash creation failed " ) ;
dm_hash_destroy ( dtree - > devs ) ;
dm_pool_destroy ( dtree - > mem ) ;
2005-11-09 01:50:11 +03:00
return NULL ;
}
2005-11-09 17:10:50 +03:00
return dtree ;
2005-10-16 18:33:22 +04:00
}
2005-11-09 17:10:50 +03:00
void dm_tree_free ( struct dm_tree * dtree )
2005-10-16 18:33:22 +04:00
{
2005-11-09 17:10:50 +03:00
if ( ! dtree )
2005-10-16 18:33:22 +04:00
return ;
2005-11-09 17:10:50 +03:00
dm_hash_destroy ( dtree - > uuids ) ;
dm_hash_destroy ( dtree - > devs ) ;
dm_pool_destroy ( dtree - > mem ) ;
2005-10-16 18:33:22 +04:00
}
2012-01-23 21:46:31 +04:00
void dm_tree_set_cookie ( struct dm_tree_node * node , uint32_t cookie )
{
node - > dtree - > cookie = cookie ;
}
uint32_t dm_tree_get_cookie ( struct dm_tree_node * node )
{
return node - > dtree - > cookie ;
}
void dm_tree_skip_lockfs ( struct dm_tree_node * dnode )
{
dnode - > dtree - > skip_lockfs = 1 ;
}
void dm_tree_use_no_flush_suspend ( struct dm_tree_node * dnode )
{
dnode - > dtree - > no_flush = 1 ;
}
void dm_tree_retry_remove ( struct dm_tree_node * dnode )
{
dnode - > dtree - > retry_remove = 1 ;
}
/*
* Node functions .
*/
2010-01-14 13:15:23 +03:00
static int _nodes_are_linked ( const struct dm_tree_node * parent ,
const struct dm_tree_node * child )
2005-10-16 18:33:22 +04:00
{
2005-11-09 17:10:50 +03:00
struct dm_tree_link * dlink ;
2005-10-16 18:33:22 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( dlink , & parent - > uses )
2005-10-16 18:33:22 +04:00
if ( dlink - > node = = child )
return 1 ;
return 0 ;
}
2008-11-04 01:14:30 +03:00
static int _link ( struct dm_list * list , struct dm_tree_node * node )
2005-10-16 18:33:22 +04:00
{
2005-11-09 17:10:50 +03:00
struct dm_tree_link * dlink ;
2005-10-16 18:33:22 +04:00
2005-11-09 17:10:50 +03:00
if ( ! ( dlink = dm_pool_alloc ( node - > dtree - > mem , sizeof ( * dlink ) ) ) ) {
log_error ( " dtree link allocation failed " ) ;
2005-10-16 18:33:22 +04:00
return 0 ;
}
dlink - > node = node ;
2008-11-04 01:14:30 +03:00
dm_list_add ( list , & dlink - > list ) ;
2005-10-16 18:33:22 +04:00
return 1 ;
}
2005-11-09 17:10:50 +03:00
static int _link_nodes ( struct dm_tree_node * parent ,
struct dm_tree_node * child )
2005-10-16 18:33:22 +04:00
{
if ( _nodes_are_linked ( parent , child ) )
return 1 ;
if ( ! _link ( & parent - > uses , child ) )
return 0 ;
if ( ! _link ( & child - > used_by , parent ) )
return 0 ;
return 1 ;
}
2008-11-04 01:14:30 +03:00
static void _unlink ( struct dm_list * list , struct dm_tree_node * node )
2005-10-16 18:33:22 +04:00
{
2005-11-09 17:10:50 +03:00
struct dm_tree_link * dlink ;
2005-10-16 18:33:22 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( dlink , list )
2005-10-16 18:33:22 +04:00
if ( dlink - > node = = node ) {
2008-11-04 01:14:30 +03:00
dm_list_del ( & dlink - > list ) ;
2005-10-16 18:33:22 +04:00
break ;
}
}
2005-11-09 17:10:50 +03:00
static void _unlink_nodes ( struct dm_tree_node * parent ,
struct dm_tree_node * child )
2005-10-16 18:33:22 +04:00
{
if ( ! _nodes_are_linked ( parent , child ) )
return ;
_unlink ( & parent - > uses , child ) ;
_unlink ( & child - > used_by , parent ) ;
}
2005-11-09 17:10:50 +03:00
static int _add_to_toplevel ( struct dm_tree_node * node )
2005-11-09 01:50:11 +03:00
{
2005-11-09 17:10:50 +03:00
return _link_nodes ( & node - > dtree - > root , node ) ;
2005-11-09 01:50:11 +03:00
}
2005-11-09 17:10:50 +03:00
static void _remove_from_toplevel ( struct dm_tree_node * node )
2005-10-16 18:33:22 +04:00
{
2009-12-11 16:16:37 +03:00
_unlink_nodes ( & node - > dtree - > root , node ) ;
2005-10-16 18:33:22 +04:00
}
2005-11-09 17:10:50 +03:00
static int _add_to_bottomlevel ( struct dm_tree_node * node )
2005-10-16 18:33:22 +04:00
{
2005-11-09 17:10:50 +03:00
return _link_nodes ( node , & node - > dtree - > root ) ;
2005-10-16 18:33:22 +04:00
}
2005-11-09 17:10:50 +03:00
static void _remove_from_bottomlevel ( struct dm_tree_node * node )
2005-11-09 01:50:11 +03:00
{
2009-12-11 16:16:37 +03:00
_unlink_nodes ( node , & node - > dtree - > root ) ;
2005-11-09 01:50:11 +03:00
}
2005-11-09 17:10:50 +03:00
static int _link_tree_nodes ( struct dm_tree_node * parent , struct dm_tree_node * child )
2005-11-09 01:50:11 +03:00
{
/* Don't link to root node if child already has a parent */
2011-03-30 00:19:03 +04:00
if ( parent = = & parent - > dtree - > root ) {
2005-11-09 17:10:50 +03:00
if ( dm_tree_node_num_children ( child , 1 ) )
2005-11-09 01:50:11 +03:00
return 1 ;
} else
_remove_from_toplevel ( child ) ;
2011-03-30 00:19:03 +04:00
if ( child = = & child - > dtree - > root ) {
2005-11-09 17:10:50 +03:00
if ( dm_tree_node_num_children ( parent , 0 ) )
2005-11-09 01:50:11 +03:00
return 1 ;
} else
_remove_from_bottomlevel ( parent ) ;
return _link_nodes ( parent , child ) ;
}
2005-11-09 17:10:50 +03:00
static struct dm_tree_node * _create_dm_tree_node ( struct dm_tree * dtree ,
2005-10-16 18:33:22 +04:00
const char * name ,
const char * uuid ,
2005-11-09 01:50:11 +03:00
struct dm_info * info ,
2009-10-22 17:00:07 +04:00
void * context ,
uint16_t udev_flags )
2005-10-16 18:33:22 +04:00
{
2005-11-09 17:10:50 +03:00
struct dm_tree_node * node ;
2012-06-21 14:55:30 +04:00
dev_t dev ;
2005-10-16 18:33:22 +04:00
2017-11-30 14:53:34 +03:00
if ( ! ( node = dm_pool_zalloc ( dtree - > mem , sizeof ( * node ) ) ) | |
! ( node - > name = dm_pool_strdup ( dtree - > mem , name ) ) | |
! ( node - > uuid = dm_pool_strdup ( dtree - > mem , uuid ) ) ) {
log_error ( " _create_dm_tree_node alloc failed. " ) ;
2005-10-16 18:33:22 +04:00
return NULL ;
}
2005-11-09 17:10:50 +03:00
node - > dtree = dtree ;
2005-10-16 18:33:22 +04:00
node - > info = * info ;
2005-11-09 01:50:11 +03:00
node - > context = context ;
2009-10-22 17:00:07 +04:00
node - > udev_flags = udev_flags ;
2005-10-16 18:33:22 +04:00
2008-11-04 01:14:30 +03:00
dm_list_init ( & node - > uses ) ;
dm_list_init ( & node - > used_by ) ;
dm_list_init ( & node - > props . segs ) ;
2005-10-16 18:33:22 +04:00
2018-11-03 19:13:10 +03:00
dev = MKDEV ( info - > major , info - > minor ) ;
2005-10-16 18:33:22 +04:00
2005-11-09 17:10:50 +03:00
if ( ! dm_hash_insert_binary ( dtree - > devs , ( const char * ) & dev ,
2017-12-04 17:45:49 +03:00
sizeof ( dev ) , node ) ) {
2005-11-09 17:10:50 +03:00
log_error ( " dtree node hash insertion failed " ) ;
dm_pool_free ( dtree - > mem , node ) ;
2005-10-16 18:33:22 +04:00
return NULL ;
}
2017-12-04 17:45:49 +03:00
if ( * uuid & & ! dm_hash_insert ( dtree - > uuids , uuid , node ) ) {
2005-11-09 17:10:50 +03:00
log_error ( " dtree uuid hash insertion failed " ) ;
dm_hash_remove_binary ( dtree - > devs , ( const char * ) & dev ,
2005-11-09 01:50:11 +03:00
sizeof ( dev ) ) ;
2005-11-09 17:10:50 +03:00
dm_pool_free ( dtree - > mem , node ) ;
2005-11-09 01:50:11 +03:00
return NULL ;
}
2005-10-16 18:33:22 +04:00
return node ;
}
2005-11-09 17:10:50 +03:00
static struct dm_tree_node * _find_dm_tree_node ( struct dm_tree * dtree ,
2005-10-16 18:33:22 +04:00
uint32_t major , uint32_t minor )
{
2018-11-03 19:13:10 +03:00
dev_t dev = MKDEV ( major , minor ) ;
2005-10-16 18:33:22 +04:00
2005-11-09 17:10:50 +03:00
return dm_hash_lookup_binary ( dtree - > devs , ( const char * ) & dev ,
2014-02-14 23:46:55 +04:00
sizeof ( dev ) ) ;
2005-10-16 18:33:22 +04:00
}
2014-07-31 00:55:11 +04:00
void dm_tree_set_optional_uuid_suffixes ( struct dm_tree * dtree , const char * * optional_uuid_suffixes )
{
dtree - > optional_uuid_suffixes = optional_uuid_suffixes ;
}
2005-11-09 17:10:50 +03:00
static struct dm_tree_node * _find_dm_tree_node_by_uuid ( struct dm_tree * dtree ,
2005-11-09 01:50:11 +03:00
const char * uuid )
{
2005-11-13 01:46:48 +03:00
struct dm_tree_node * node ;
2012-01-10 06:03:31 +04:00
const char * default_uuid_prefix ;
size_t default_uuid_prefix_len ;
2014-07-31 00:55:11 +04:00
const char * suffix , * suffix_position ;
char uuid_without_suffix [ DM_UUID_LEN ] ;
unsigned i = 0 ;
const char * * suffix_list = dtree - > optional_uuid_suffixes ;
2005-11-13 01:46:48 +03:00
2014-07-31 00:55:11 +04:00
if ( ( node = dm_hash_lookup ( dtree - > uuids , uuid ) ) ) {
log_debug ( " Matched uuid %s in deptree. " , uuid ) ;
2005-11-13 01:46:48 +03:00
return node ;
2014-07-31 00:55:11 +04:00
}
2005-11-13 01:46:48 +03:00
2012-01-10 06:03:31 +04:00
default_uuid_prefix = dm_uuid_prefix ( ) ;
default_uuid_prefix_len = strlen ( default_uuid_prefix ) ;
2014-07-31 00:55:11 +04:00
if ( suffix_list & & ( suffix_position = rindex ( uuid , ' - ' ) ) ) {
while ( ( suffix = suffix_list [ i + + ] ) ) {
if ( strcmp ( suffix_position + 1 , suffix ) )
continue ;
( void ) strncpy ( uuid_without_suffix , uuid , sizeof ( uuid_without_suffix ) ) ;
uuid_without_suffix [ suffix_position - uuid ] = ' \0 ' ;
if ( ( node = dm_hash_lookup ( dtree - > uuids , uuid_without_suffix ) ) ) {
log_debug ( " Matched uuid %s (missing suffix -%s) in deptree. " , uuid_without_suffix , suffix ) ;
return node ;
}
break ;
} ;
}
2012-01-10 06:03:31 +04:00
if ( strncmp ( uuid , default_uuid_prefix , default_uuid_prefix_len ) )
2005-11-13 01:46:48 +03:00
return NULL ;
2014-07-31 00:55:11 +04:00
if ( ( node = dm_hash_lookup ( dtree - > uuids , uuid + default_uuid_prefix_len ) ) ) {
log_debug ( " Matched uuid %s (missing prefix) in deptree. " , uuid + default_uuid_prefix_len ) ;
return node ;
}
2016-04-17 15:51:58 +03:00
log_debug ( " Not matched uuid %s in deptree. " , uuid ) ;
2014-07-31 00:55:11 +04:00
return NULL ;
2005-11-09 01:50:11 +03:00
}
2015-07-01 10:54:57 +03:00
/* Return node's device_name (major:minor) for debug messages */
static const char * _node_name ( struct dm_tree_node * dnode )
{
if ( dm_snprintf ( dnode - > dtree - > buf , sizeof ( dnode - > dtree - > buf ) ,
2017-12-04 15:33:05 +03:00
" %s ( " FMTu32 " : " FMTu32 " ) " ,
dnode - > name ? dnode - > name : " " ,
dnode - > info . major , dnode - > info . minor ) < 0 ) {
2015-07-01 10:54:57 +03:00
stack ;
return dnode - > name ;
}
return dnode - > dtree - > buf ;
}
2012-01-23 21:46:31 +04:00
void dm_tree_node_set_udev_flags ( struct dm_tree_node * dnode , uint16_t udev_flags )
{
if ( udev_flags ! = dnode - > udev_flags )
2017-12-04 15:33:05 +03:00
log_debug_activation ( " Resetting %s udev_flags from 0x%x to 0x%x. " ,
_node_name ( dnode ) ,
2013-01-08 02:30:29 +04:00
dnode - > udev_flags , udev_flags ) ;
2012-01-23 21:46:31 +04:00
dnode - > udev_flags = udev_flags ;
}
void dm_tree_node_set_read_ahead ( struct dm_tree_node * dnode ,
uint32_t read_ahead ,
uint32_t read_ahead_flags )
{
dnode - > props . read_ahead = read_ahead ;
dnode - > props . read_ahead_flags = read_ahead_flags ;
}
void dm_tree_node_set_presuspend_node ( struct dm_tree_node * node ,
struct dm_tree_node * presuspend_node )
{
node - > presuspend_node = presuspend_node ;
}
const char * dm_tree_node_get_name ( const struct dm_tree_node * node )
{
return node - > info . exists ? node - > name : " " ;
}
const char * dm_tree_node_get_uuid ( const struct dm_tree_node * node )
{
return node - > info . exists ? node - > uuid : " " ;
}
const struct dm_info * dm_tree_node_get_info ( const struct dm_tree_node * node )
{
return & node - > info ;
}
void * dm_tree_node_get_context ( const struct dm_tree_node * node )
{
return node - > context ;
}
int dm_tree_node_size_changed ( const struct dm_tree_node * dnode )
{
return dnode - > props . size_changed ;
}
int dm_tree_node_num_children ( const struct dm_tree_node * node , uint32_t inverted )