2001-11-21 15:47:42 +03:00
/*
2004-03-30 23:08:57 +04:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2017-03-01 00:34:00 +03:00
* Copyright ( C ) 2004 - 2017 Red Hat , Inc . All rights reserved .
2015-08-05 07:21:58 +03:00
* Copyright ( C ) 2006 Rackable Systems All rights reserved .
2001-11-21 15:47:42 +03:00
*
2004-03-30 23:08:57 +04:00
* This file is part of the device - mapper userspace tools .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA .
2001-11-21 15:47:42 +03:00
*/
# ifndef LIB_DEVICE_MAPPER_H
# define LIB_DEVICE_MAPPER_H
2001-12-05 19:41:52 +03:00
# include <inttypes.h>
2006-08-21 16:52:39 +04:00
# include <stdarg.h>
2002-01-15 18:21:57 +03:00
# include <sys/types.h>
2011-08-30 18:55:15 +04:00
# include <sys/stat.h>
2003-11-12 20:30:32 +03:00
2013-10-17 13:14:07 +04:00
# ifdef __linux__
2003-11-12 20:30:32 +03:00
# include <linux / types.h>
# endif
2001-12-05 19:41:52 +03:00
2005-10-17 02:57:20 +04:00
# include <limits.h>
# include <string.h>
# include <stdlib.h>
2007-07-24 18:15:45 +04:00
# include <stdio.h>
2020-03-04 18:07:22 +03:00
# include <stddef.h> /* offsetof */
2005-10-17 02:57:20 +04:00
2010-06-23 21:03:14 +04:00
# ifndef __GNUC__
# define __typeof__ typeof
# endif
2013-05-29 14:46:21 +04:00
/* Macros to make string defines */
# define DM_TO_STRING_EXP(A) #A
# define DM_TO_STRING(A) DM_TO_STRING_EXP(A)
2013-07-06 22:35:43 +04:00
# define DM_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
2010-06-16 17:01:25 +04:00
# ifdef __cplusplus
extern " C " {
# endif
2005-11-09 17:10:50 +03:00
/*****************************************************************
2009-07-10 13:59:37 +04:00
* The first section of this file provides direct access to the
* individual device - mapper ioctls . Since it is quite laborious to
* build the ioctl arguments for the device - mapper , people are
* encouraged to use this library .
2005-11-09 17:10:50 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2001-11-21 15:47:42 +03:00
/*
2009-07-10 13:59:37 +04:00
* The library user may wish to register their own
* logging function . By default errors go to stderr .
* Use dm_log_with_errno_init ( NULL ) to restore the default log fn .
2013-01-08 02:25:19 +04:00
* Error messages may have a non - zero errno .
* Debug messages may have a non - zero class .
2013-07-25 16:32:09 +04:00
* Aborts on internal error when env DM_ABORT_ON_INTERNAL_ERRORS is 1
2001-11-21 15:47:42 +03:00
*/
2009-07-10 13:59:37 +04:00
typedef void ( * dm_log_with_errno_fn ) ( int level , const char * file , int line ,
2013-01-08 02:25:19 +04:00
int dm_errno_or_class , const char * f , . . . )
2009-07-10 13:59:37 +04:00
__attribute__ ( ( format ( printf , 5 , 6 ) ) ) ;
void dm_log_with_errno_init ( dm_log_with_errno_fn fn ) ;
void dm_log_init_verbose ( int level ) ;
/*
* Original version of this function .
* dm_errno is set to 0.
*
* Deprecated : Use the _with_errno_ versions above instead .
*/
2002-03-07 23:56:10 +03:00
typedef void ( * dm_log_fn ) ( int level , const char * file , int line ,
2006-01-31 17:50:38 +03:00
const char * f , . . . )
__attribute__ ( ( format ( printf , 4 , 5 ) ) ) ;
2010-07-03 01:16:50 +04:00
2009-07-10 13:59:37 +04:00
void dm_log_init ( dm_log_fn fn ) ;
2001-11-21 15:47:42 +03:00
/*
2009-07-10 13:59:37 +04:00
* For backward - compatibility , indicate that dm_log_init ( ) was used
* to set a non - default value of dm_log ( ) .
2001-11-21 15:47:42 +03:00
*/
2009-07-10 13:59:37 +04:00
int dm_log_is_non_default ( void ) ;
2001-11-21 15:47:42 +03:00
2011-06-13 07:32:45 +04:00
/*
* Number of devices currently in suspended state ( via the library ) .
*/
int dm_get_suspended_counter ( void ) ;
2001-11-21 15:47:42 +03:00
enum {
DM_DEVICE_CREATE ,
DM_DEVICE_RELOAD ,
DM_DEVICE_REMOVE ,
2002-03-07 23:56:10 +03:00
DM_DEVICE_REMOVE_ALL ,
2001-11-21 15:47:42 +03:00
DM_DEVICE_SUSPEND ,
DM_DEVICE_RESUME ,
DM_DEVICE_INFO ,
2002-03-06 17:38:25 +03:00
DM_DEVICE_DEPS ,
2002-01-11 02:29:16 +03:00
DM_DEVICE_RENAME ,
2002-01-15 18:21:57 +03:00
DM_DEVICE_VERSION ,
2002-05-03 15:55:58 +04:00
DM_DEVICE_STATUS ,
DM_DEVICE_TABLE ,
2003-07-02 01:20:58 +04:00
DM_DEVICE_WAITEVENT ,
DM_DEVICE_LIST ,
2003-11-13 16:14:28 +03:00
DM_DEVICE_CLEAR ,
2004-01-23 17:37:47 +03:00
DM_DEVICE_MKNODES ,
2004-06-09 00:34:40 +04:00
DM_DEVICE_LIST_VERSIONS ,
2006-02-21 02:55:58 +03:00
DM_DEVICE_TARGET_MSG ,
2018-11-16 17:58:16 +03:00
DM_DEVICE_SET_GEOMETRY ,
2019-09-16 12:58:10 +03:00
DM_DEVICE_ARM_POLL ,
2019-10-21 17:31:53 +03:00
DM_DEVICE_GET_TARGET_VERSION
2001-11-21 15:47:42 +03:00
} ;
2009-07-10 13:59:37 +04:00
/*
* You will need to build a struct dm_task for
* each ioctl command you want to execute .
*/
2015-11-30 23:16:43 +03:00
struct dm_pool ;
2001-11-21 15:47:42 +03:00
struct dm_task ;
2015-08-05 10:28:35 +03:00
struct dm_timestamp ;
2001-11-21 15:47:42 +03:00
struct dm_task * dm_task_create ( int type ) ;
void dm_task_destroy ( struct dm_task * dmt ) ;
int dm_task_set_name ( struct dm_task * dmt , const char * name ) ;
2002-03-12 01:44:36 +03:00
int dm_task_set_uuid ( struct dm_task * dmt , const char * uuid ) ;
2001-11-21 15:47:42 +03:00
/*
* Retrieve attributes after an info .
*/
struct dm_info {
int exists ;
int suspended ;
2003-07-02 01:20:58 +04:00
int live_table ;
int inactive_table ;
2003-03-28 21:58:59 +03:00
int32_t open_count ;
2003-04-29 15:34:23 +04:00
uint32_t event_nr ;
2003-03-28 21:58:59 +03:00
uint32_t major ;
uint32_t minor ; /* minor device number */
2002-01-03 18:12:02 +03:00
int read_only ; /* 0:read-write; 1:read-only */
2002-01-03 13:39:21 +03:00
2003-03-28 21:58:59 +03:00
int32_t target_count ;
2014-08-16 03:34:48 +04:00
int deferred_remove ;
2015-05-15 16:29:46 +03:00
int internal_suspend ;
2001-11-21 15:47:42 +03:00
} ;
2002-03-06 17:38:25 +03:00
struct dm_deps {
2003-03-28 21:58:59 +03:00
uint32_t count ;
uint32_t filler ;
2020-08-28 20:15:01 +03:00
uint64_t device [ ] ;
2002-03-06 17:38:25 +03:00
} ;
2003-07-02 01:20:58 +04:00
struct dm_names {
uint64_t dev ;
uint32_t next ; /* Offset to next struct from start of this struct */
2020-08-28 20:15:01 +03:00
char name [ ] ;
2003-07-02 01:20:58 +04:00
} ;
2004-01-23 17:37:47 +03:00
struct dm_versions {
2007-01-16 21:04:15 +03:00
uint32_t next ; /* Offset to next struct from start of this struct */
uint32_t version [ 3 ] ;
2004-01-23 17:37:47 +03:00
2020-08-28 20:15:01 +03:00
char name [ ] ;
2004-01-23 17:37:47 +03:00
} ;
2002-01-17 17:13:25 +03:00
int dm_get_library_version ( char * version , size_t size ) ;
2002-03-07 23:56:10 +03:00
int dm_task_get_driver_version ( struct dm_task * dmt , char * version , size_t size ) ;
2001-11-21 15:47:42 +03:00
int dm_task_get_info ( struct dm_task * dmt , struct dm_info * dmi ) ;
2012-10-10 18:59:47 +04:00
/*
* This function returns dm device ' s UUID based on the value
* of the mangling mode set during preceding dm_task_run call :
2012-10-15 17:40:37 +04:00
* - unmangled UUID for DM_STRING_MANGLING_ { AUTO , HEX } ,
* - UUID without any changes for DM_STRING_MANGLING_NONE .
2012-10-10 18:59:47 +04:00
*
2012-10-15 17:40:37 +04:00
* To get mangled or unmangled form of the UUID directly , use
2012-10-10 18:59:47 +04:00
* dm_task_get_uuid_mangled or dm_task_get_uuid_unmangled function .
*/
2007-04-27 18:52:41 +04:00
const char * dm_task_get_uuid ( const struct dm_task * dmt ) ;
2001-11-21 15:47:42 +03:00
2002-03-06 17:38:25 +03:00
struct dm_deps * dm_task_get_deps ( struct dm_task * dmt ) ;
2004-01-23 17:37:47 +03:00
struct dm_versions * dm_task_get_versions ( struct dm_task * dmt ) ;
2013-08-16 18:25:39 +04:00
const char * dm_task_get_message_response ( struct dm_task * dmt ) ;
2002-03-06 17:38:25 +03:00
2012-02-15 16:01:28 +04:00
/*
* These functions return device - mapper names based on the value
* of the mangling mode set during preceding dm_task_run call :
* - unmangled name for DM_STRING_MANGLING_ { AUTO , HEX } ,
* - name without any changes for DM_STRING_MANGLING_NONE .
*
* To get mangled or unmangled form of the name directly , use
* dm_task_get_name_mangled or dm_task_get_name_unmangled function .
*/
const char * dm_task_get_name ( const struct dm_task * dmt ) ;
struct dm_names * dm_task_get_names ( struct dm_task * dmt ) ;
2012-02-15 15:39:38 +04:00
2002-01-03 13:39:21 +03:00
int dm_task_set_ro ( struct dm_task * dmt ) ;
2002-01-11 02:29:16 +03:00
int dm_task_set_newname ( struct dm_task * dmt , const char * newname ) ;
2010-10-15 05:10:27 +04:00
int dm_task_set_newuuid ( struct dm_task * dmt , const char * newuuid ) ;
2002-01-11 15:12:46 +03:00
int dm_task_set_minor ( struct dm_task * dmt , int minor ) ;
2003-04-02 23:03:00 +04:00
int dm_task_set_major ( struct dm_task * dmt , int major ) ;
2009-06-18 00:55:24 +04:00
int dm_task_set_major_minor ( struct dm_task * dmt , int major , int minor , int allow_default_major_fallback ) ;
2006-02-03 17:23:22 +03:00
int dm_task_set_uid ( struct dm_task * dmt , uid_t uid ) ;
int dm_task_set_gid ( struct dm_task * dmt , gid_t gid ) ;
int dm_task_set_mode ( struct dm_task * dmt , mode_t mode ) ;
2016-08-23 16:57:47 +03:00
/* See also description for DM_UDEV_DISABLE_LIBRARY_FALLBACK flag! */
2009-10-22 16:55:47 +04:00
int dm_task_set_cookie ( struct dm_task * dmt , uint32_t * cookie , uint16_t flags ) ;
2003-04-29 15:34:23 +04:00
int dm_task_set_event_nr ( struct dm_task * dmt , uint32_t event_nr ) ;
2006-02-21 02:55:58 +03:00
int dm_task_set_geometry ( struct dm_task * dmt , const char * cylinders , const char * heads , const char * sectors , const char * start ) ;
2004-06-09 00:34:40 +04:00
int dm_task_set_message ( struct dm_task * dmt , const char * message ) ;
int dm_task_set_sector ( struct dm_task * dmt , uint64_t sector ) ;
2006-10-12 19:42:25 +04:00
int dm_task_no_flush ( struct dm_task * dmt ) ;
2005-01-13 01:10:14 +03:00
int dm_task_no_open_count ( struct dm_task * dmt ) ;
2005-10-05 00:12:32 +04:00
int dm_task_skip_lockfs ( struct dm_task * dmt ) ;
2009-11-06 03:43:08 +03:00
int dm_task_query_inactive_table ( struct dm_task * dmt ) ;
2005-11-22 21:43:12 +03:00
int dm_task_suppress_identical_reload ( struct dm_task * dmt ) ;
2011-02-04 19:08:11 +03:00
int dm_task_secure_data ( struct dm_task * dmt ) ;
2011-09-22 21:09:48 +04:00
int dm_task_retry_remove ( struct dm_task * dmt ) ;
2014-08-16 03:34:48 +04:00
int dm_task_deferred_remove ( struct dm_task * dmt ) ;
2021-07-13 04:06:04 +03:00
int dm_task_ima_measurement ( struct dm_task * dmt ) ;
2011-07-01 18:09:19 +04:00
2015-08-05 10:28:35 +03:00
/*
* Record timestamp immediately after the ioctl returns .
*/
int dm_task_set_record_timestamp ( struct dm_task * dmt ) ;
struct dm_timestamp * dm_task_get_ioctl_timestamp ( struct dm_task * dmt ) ;
2011-07-01 18:09:19 +04:00
/*
* Enable checks for common mistakes such as issuing ioctls in an unsafe order .
*/
int dm_task_enable_checks ( struct dm_task * dmt ) ;
2021-04-03 15:25:56 +03:00
typedef enum dm_add_node_e {
2011-02-04 22:33:53 +03:00
DM_ADD_NODE_ON_RESUME , /* add /dev/mapper node with dmsetup resume */
DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */
} dm_add_node_t ;
int dm_task_set_add_node ( struct dm_task * dmt , dm_add_node_t add_node ) ;
2002-01-11 02:29:16 +03:00
2007-11-27 23:57:05 +03:00
/*
* Control read_ahead .
*/
# define DM_READ_AHEAD_AUTO UINT32_MAX /* Use kernel default readahead */
# define DM_READ_AHEAD_NONE 0 /* Disable readahead */
# define DM_READ_AHEAD_MINIMUM_FLAG 0x1 /* Value supplied is minimum */
2007-12-05 19:28:19 +03:00
/*
* Read ahead is set with DM_DEVICE_CREATE with a table or DM_DEVICE_RESUME .
*/
2007-11-27 23:57:05 +03:00
int dm_task_set_read_ahead ( struct dm_task * dmt , uint32_t read_ahead ,
uint32_t read_ahead_flags ) ;
2007-11-30 17:59:57 +03:00
uint32_t dm_task_get_read_ahead ( const struct dm_task * dmt ,
uint32_t * read_ahead ) ;
2007-11-27 23:57:05 +03:00
2001-11-21 15:47:42 +03:00
/*
* Use these to prepare for a create or reload .
*/
int dm_task_add_target ( struct dm_task * dmt ,
2001-12-05 19:41:52 +03:00
uint64_t start ,
2002-03-07 23:56:10 +03:00
uint64_t size , const char * ttype , const char * params ) ;
2001-11-21 15:47:42 +03:00
2003-07-02 01:20:58 +04:00
/*
2008-09-02 16:16:07 +04:00
* Format major / minor numbers correctly for input to driver .
2003-07-02 01:20:58 +04:00
*/
2008-09-02 16:16:07 +04:00
# define DM_FORMAT_DEV_BUFSIZE 13 /* Minimum bufsize to handle worst case. */
2003-07-02 01:20:58 +04:00
int dm_format_dev ( char * buf , int bufsize , uint32_t dev_major , uint32_t dev_minor ) ;
2024-08-30 00:05:41 +03:00
/* Use this to retrieve target information returned from a STATUS call */
2002-05-03 15:55:58 +04:00
void * dm_get_next_target ( struct dm_task * dmt ,
2002-05-10 19:25:38 +04:00
void * next , uint64_t * start , uint64_t * length ,
char * * target_type , char * * params ) ;
2002-05-03 15:55:58 +04:00
2013-02-01 21:31:47 +04:00
/*
2024-08-30 00:05:41 +03:00
* Following dm_get_status_ * functions will allocate appropriate status structure
2015-11-30 23:16:43 +03:00
* from passed mempool together with the necessary character arrays .
2024-08-30 00:05:41 +03:00
* Destroying the mempool will release all associated allocation .
2013-02-01 21:31:47 +04:00
*/
2011-12-21 16:52:38 +04:00
2015-11-30 23:16:43 +03:00
/* Parse params from STATUS call for mirror target */
2021-04-03 15:25:56 +03:00
typedef enum dm_status_mirror_health_e {
2015-11-30 23:16:43 +03:00
DM_STATUS_MIRROR_ALIVE = ' A ' , /* No failures */
DM_STATUS_MIRROR_FLUSH_FAILED = ' F ' , /* Mirror out-of-sync */
DM_STATUS_MIRROR_WRITE_FAILED = ' D ' , /* Mirror out-of-sync */
DM_STATUS_MIRROR_SYNC_FAILED = ' S ' , /* Mirror out-of-sync */
DM_STATUS_MIRROR_READ_FAILED = ' R ' , /* Mirror data unaffected */
DM_STATUS_MIRROR_UNCLASSIFIED = ' U ' /* Bug */
} dm_status_mirror_health_t ;
struct dm_status_mirror {
uint64_t total_regions ;
uint64_t insync_regions ;
2015-12-04 22:24:47 +03:00
uint32_t dev_count ; /* # of devs[] elements (<= 8) */
2021-04-03 15:25:56 +03:00
struct dm_dev_leg_health_s {
2015-11-30 23:16:43 +03:00
dm_status_mirror_health_t health ;
uint32_t major ;
uint32_t minor ;
2015-12-04 22:24:47 +03:00
} * devs ; /* array with individual legs */
const char * log_type ; /* core, disk,.... */
uint32_t log_count ; /* # of logs[] elements */
2021-04-03 15:25:56 +03:00
struct dm_dev_log_health_s {
2015-11-30 23:16:43 +03:00
dm_status_mirror_health_t health ;
uint32_t major ;
uint32_t minor ;
2015-12-04 22:24:47 +03:00
} * logs ; /* array with individual logs */
2015-11-30 23:16:43 +03:00
} ;
int dm_get_status_mirror ( struct dm_pool * mem , const char * params ,
struct dm_status_mirror * * status ) ;
/* Parse params from STATUS call for raid target */
2013-02-01 21:31:47 +04:00
struct dm_status_raid {
2013-04-09 00:04:08 +04:00
uint64_t reserved ;
2016-03-24 20:42:36 +03:00
uint64_t total_regions ; /* sectors */
uint64_t insync_regions ; /* sectors */
2013-04-09 00:04:08 +04:00
uint64_t mismatch_count ;
2013-06-17 21:58:38 +04:00
uint32_t dev_count ;
2013-04-09 00:04:08 +04:00
char * raid_type ;
2016-11-16 17:29:09 +03:00
/* A - alive, a - alive not in-sync, D - dead/failed */
2013-04-09 00:04:08 +04:00
char * dev_health ;
2016-11-16 17:29:09 +03:00
/* idle, frozen, resync, recover, check, repair */
2013-04-09 00:04:08 +04:00
char * sync_action ;
2017-02-24 02:50:00 +03:00
uint64_t data_offset ; /* RAID out-of-place reshaping */
2013-02-01 21:31:47 +04:00
} ;
int dm_get_status_raid ( struct dm_pool * mem , const char * params ,
struct dm_status_raid * * status ) ;
2015-11-30 23:16:43 +03:00
/* Parse params from STATUS call for cache target */
2014-01-27 15:30:10 +04:00
struct dm_status_cache {
uint64_t version ; /* zero for now */
uint32_t metadata_block_size ; /* in 512B sectors */
uint32_t block_size ; /* AKA 'chunk_size' */
uint64_t metadata_used_blocks ;
uint64_t metadata_total_blocks ;
uint64_t used_blocks ;
uint64_t dirty_blocks ;
uint64_t total_blocks ;
uint64_t read_hits ;
uint64_t read_misses ;
uint64_t write_hits ;
uint64_t write_misses ;
uint64_t demotions ;
uint64_t promotions ;
2017-02-24 01:38:06 +03:00
uint64_t feature_flags ; /* DM_CACHE_FEATURE_? */
2014-01-27 15:30:10 +04:00
int core_argc ;
char * * core_argv ;
char * policy_name ;
2016-03-09 20:00:57 +03:00
int policy_argc ;
2014-01-27 15:30:10 +04:00
char * * policy_argv ;
2016-03-09 20:00:57 +03:00
unsigned error : 1 ; /* detected error (switches to fail soon) */
unsigned fail : 1 ; /* all I/O fails */
unsigned needs_check : 1 ; /* metadata needs check */
unsigned read_only : 1 ; /* metadata may not be changed */
uint32_t reserved : 28 ;
2014-01-27 15:30:10 +04:00
} ;
int dm_get_status_cache ( struct dm_pool * mem , const char * params ,
struct dm_status_cache * * status ) ;
2013-05-26 18:57:50 +04:00
/*
2015-11-30 23:16:43 +03:00
* Parse params from STATUS call for snapshot target
*
2013-05-26 18:57:50 +04:00
* Snapshot target ' s format :
* < = 1.7 .0 : < used_sectors > / < total_sectors >
* > = 1.8 .0 : < used_sectors > / < total_sectors > < metadata_sectors >
*/
struct dm_status_snapshot {
uint64_t used_sectors ; /* in 512b units */
uint64_t total_sectors ;
uint64_t metadata_sectors ;
unsigned has_metadata_sectors : 1 ; /* set when metadata_sectors is present */
unsigned invalid : 1 ; /* set when snapshot is invalidated */
unsigned merge_failed : 1 ; /* set when snapshot merge failed */
2015-09-18 15:41:00 +03:00
unsigned overflow : 1 ; /* set when snapshot overflows */
2013-05-26 18:57:50 +04:00
} ;
int dm_get_status_snapshot ( struct dm_pool * mem , const char * params ,
struct dm_status_snapshot * * status ) ;
2015-11-30 23:16:43 +03:00
/* Parse params from STATUS call for thin_pool target */
2021-04-03 15:25:56 +03:00
typedef enum dm_thin_discards_e {
2012-12-10 13:23:17 +04:00
DM_THIN_DISCARDS_IGNORE ,
DM_THIN_DISCARDS_NO_PASSDOWN ,
DM_THIN_DISCARDS_PASSDOWN
} dm_thin_discards_t ;
2011-12-21 16:52:38 +04:00
struct dm_status_thin_pool {
uint64_t transaction_id ;
2012-01-19 19:21:23 +04:00
uint64_t used_metadata_blocks ;
uint64_t total_metadata_blocks ;
2011-12-21 16:52:38 +04:00
uint64_t used_data_blocks ;
uint64_t total_data_blocks ;
2012-01-19 19:21:23 +04:00
uint64_t held_metadata_root ;
2015-01-13 17:23:03 +03:00
uint32_t read_only ; /* metadata may not be changed */
2012-12-10 13:23:17 +04:00
dm_thin_discards_t discards ;
2015-01-13 17:23:03 +03:00
uint32_t fail : 1 ; /* all I/O fails */
uint32_t error_if_no_space : 1 ; /* otherwise queue_if_no_space */
2016-02-15 18:33:38 +03:00
uint32_t out_of_data_space : 1 ; /* metadata may be changed, but data may not be allocated (no rw) */
uint32_t needs_check : 1 ; /* metadata needs check */
uint32_t error : 1 ; /* detected error (switches to fail soon) */
uint32_t reserved : 27 ;
2011-12-21 16:52:38 +04:00
} ;
int dm_get_status_thin_pool ( struct dm_pool * mem , const char * params ,
struct dm_status_thin_pool * * status ) ;
2015-11-30 23:16:43 +03:00
/* Parse params from STATUS call for thin target */
2011-12-21 16:52:38 +04:00
struct dm_status_thin {
uint64_t mapped_sectors ;
uint64_t highest_mapped_sector ;
2016-02-15 18:33:38 +03:00
uint32_t fail : 1 ; /* Thin volume fails I/O */
uint32_t reserved : 31 ;
2011-12-21 16:52:38 +04:00
} ;
int dm_get_status_thin ( struct dm_pool * mem , const char * params ,
struct dm_status_thin * * status ) ;
2015-08-05 12:40:00 +03:00
/*
* device - mapper statistics support
*/
/*
* Statistics handle .
*
* Operations on dm_stats objects include managing statistics regions
* and obtaining and manipulating current counter values from the
2024-08-30 00:05:41 +03:00
* kernel . Methods are provided to return basic count values and to
2015-08-05 12:40:00 +03:00
* derive time - based metrics when a suitable interval estimate is
* provided .
*
* Internally the dm_stats handle contains a pointer to a table of one
* or more dm_stats_region objects representing the regions registered
* with the dm_stats_create_region ( ) method . These in turn point to a
* table of one or more dm_stats_counters objects containing the
* counter sets for each defined area within the region :
*
* dm_stats - > dm_stats_region [ nr_regions ] - > dm_stats_counters [ nr_areas ]
*
* This structure is private to the library and may change in future
* versions : all users should make use of the public interface and treat
* the dm_stats type as an opaque handle .
*
* Regions and counter sets are stored in order of increasing region_id .
* Depending on region specifications and the sequence of create and
* delete operations this may not correspond to increasing sector
* number : users of the library should not assume that this is the case
* unless region creation is deliberately managed to ensure this ( by
* always creating regions in strict order of ascending sector address ) .
*
* Regions may also overlap so the same sector range may be included in
* more than one region or area : applications should be prepared to deal
* with this or manage regions such that it does not occur .
*/
struct dm_stats ;
2015-08-19 22:39:10 +03:00
/*
* Histogram handle .
*
* A histogram object represents the latency histogram values and bin
* boundaries of the histogram associated with a particular area .
*
* Operations on the handle allow the number of bins , bin boundaries ,
* counts and relative proportions to be obtained as well as the
* conversion of a histogram or its bounds to a compact string
* representation .
*/
struct dm_histogram ;
2015-08-05 12:40:00 +03:00
/*
* Allocate a dm_stats handle to use for subsequent device - mapper
* statistics operations . A program_id may be specified and will be
* used by default for subsequent operations on this handle .
*
* If program_id is NULL or the empty string a program_id will be
* automatically set to the value contained in / proc / self / comm .
*/
struct dm_stats * dm_stats_create ( const char * program_id ) ;
/*
* Bind a dm_stats handle to the specified device major and minor
* values . Any previous binding is cleared and any preexisting counter
* data contained in the handle is released .
*/
int dm_stats_bind_devno ( struct dm_stats * dms , int major , int minor ) ;
/*
* Bind a dm_stats handle to the specified device name .
* Any previous binding is cleared and any preexisting counter
* data contained in the handle is released .
*/
int dm_stats_bind_name ( struct dm_stats * dms , const char * name ) ;
/*
* Bind a dm_stats handle to the specified device UUID .
* Any previous binding is cleared and any preexisting counter
* data contained in the handle is released .
*/
int dm_stats_bind_uuid ( struct dm_stats * dms , const char * uuid ) ;
2016-12-18 17:40:57 +03:00
/*
* Bind a dm_stats handle to the device backing the file referenced
* by the specified file descriptor .
*
* File descriptor fd must reference a regular file , open for reading ,
* in a local file system , backed by a device - mapper device , that
* supports the FIEMAP ioctl , and that returns data describing the
* physical location of extents .
*/
int dm_stats_bind_from_fd ( struct dm_stats * dms , int fd ) ;
2015-08-18 18:37:03 +03:00
/*
* Test whether the running kernel supports the precise_timestamps
* feature . Presence of this feature also implies histogram support .
* The library will check this call internally and fails any attempt
* to use nanosecond counters or histograms on kernels that fail to
* meet this check .
*/
int dm_message_supports_precise_timestamps ( void ) ;
2015-08-20 13:55:06 +03:00
/*
2024-08-30 00:05:41 +03:00
* Precise timestamps and histogram support .
2015-08-20 13:55:06 +03:00
*
2015-08-19 22:39:10 +03:00
* Test for the presence of precise_timestamps and histogram support .
2015-08-20 13:55:06 +03:00
*/
int dm_stats_driver_supports_precise ( void ) ;
2015-08-19 22:39:10 +03:00
int dm_stats_driver_supports_histogram ( void ) ;
2015-08-20 13:55:06 +03:00
2015-08-17 20:08:18 +03:00
/*
* Returns 1 if the specified region has the precise_timestamps feature
* enabled ( i . e . produces nanosecond - precision counter values ) or 0 for
2024-08-30 00:05:41 +03:00
* a region using the default millisecond precision .
2015-08-17 20:08:18 +03:00
*/
int dm_stats_get_region_precise_timestamps ( const struct dm_stats * dms ,
uint64_t region_id ) ;
/*
* Returns 1 if the region at the current cursor location has the
* precise_timestamps feature enabled ( i . e . produces
* nanosecond - precision counter values ) or 0 for a region using the
2024-08-30 00:05:41 +03:00
* default millisecond precision .
2015-08-17 20:08:18 +03:00
*/
int dm_stats_get_current_region_precise_timestamps ( const struct dm_stats * dms ) ;
2015-08-05 12:40:00 +03:00
# define DM_STATS_ALL_PROGRAMS ""
/*
* Parse the response from a @ stats_list message . dm_stats_list will
* allocate the necessary dm_stats and dm_stats region structures from
* the embedded dm_pool . No counter data will be obtained ( the counters
* members of dm_stats_region objects are set to NULL ) .
*
* A program_id may optionally be supplied ; if the argument is non - NULL
* only regions with a matching program_id value will be considered . If
* the argument is NULL then the default program_id associated with the
* dm_stats handle will be used . Passing the special value
* DM_STATS_ALL_PROGRAMS will cause all regions to be queried
* regardless of region program_id .
*/
int dm_stats_list ( struct dm_stats * dms , const char * program_id ) ;
# define DM_STATS_REGIONS_ALL UINT64_MAX
/*
* Populate a dm_stats object with statistics for one or more regions of
* the specified device .
*
* A program_id may optionally be supplied ; if the argument is non - NULL
* only regions with a matching program_id value will be considered . If
* the argument is NULL then the default program_id associated with the
* dm_stats handle will be used . Passing the special value
* DM_STATS_ALL_PROGRAMS will cause all regions to be queried
* regardless of region program_id .
*
* Passing the special value DM_STATS_REGIONS_ALL as the region_id
* argument will attempt to retrieve all regions selected by the
* program_id argument .
*
* If region_id is used to request a single region_id to be populated
* the program_id is ignored .
*/
int dm_stats_populate ( struct dm_stats * dms , const char * program_id ,
uint64_t region_id ) ;
/*
* Create a new statistics region on the device bound to dms .
*
* start and len specify the region start and length in 512 b sectors .
* Passing zero for both start and len will create a region spanning
* the entire device .
*
* Step determines how to subdivide the region into discrete counter
* sets : a positive value specifies the size of areas into which the
* region should be split while a negative value will split the region
* into a number of areas equal to the absolute value of step :
*
* - a region with one area spanning the entire device :
*
* dm_stats_create_region ( dms , 0 , 0 , - 1 , p , a ) ;
*
* - a region with areas of 1 MiB :
*
* dm_stats_create_region ( dms , 0 , 0 , 1 < < 11 , p , a ) ;
*
* - one 1 MiB region starting at 1024 sectors with two areas :
*
* dm_stats_create_region ( dms , 1024 , 1 < < 11 , - 2 , p , a ) ;
*
2015-08-20 13:55:06 +03:00
* If precise is non - zero attempt to create a region with nanosecond
* precision counters using the kernel precise_timestamps feature .
*
2015-08-19 22:39:10 +03:00
* precise - A flag to request nanosecond precision counters
* to be used for this region .
*
* histogram_bounds - specify the boundaries of a latency histogram to
* be tracked for the region . The values are expressed as an array of
* uint64_t terminated with a zero . Values must be in order of ascending
* magnitude and specify the upper bounds of successive histogram bins
* in nanoseconds ( with an implicit lower bound of zero on the first bin
* and an implicit upper bound of infinity on the final bin ) . For
* example :
*
* uint64_t bounds_ary [ ] = { 1000 , 2000 , 3000 , 0 } ;
*
* Specifies a histogram with four bins : 0 - 1000 ns , 1000 - 2000 ns ,
* 2000 - 3000 ns and > 3000 ns .
*
* The smallest latency value that can be tracked for a region not using
* precise_timestamps is 1 ms : attempting to create a region with
* histogram boundaries < 1 ms will cause the precise_timestamps feature
* to be enabled for that region automatically if it was not requested
* explicitly .
*
2015-08-05 12:40:00 +03:00
* program_id is an optional string argument that identifies the
* program creating the region . If program_id is NULL or the empty
* string the default program_id stored in the handle will be used .
*
2016-07-05 17:17:44 +03:00
* user_data is an optional string argument that is added to the
* content of the aux_data field stored with the statistics region by
* the kernel .
*
* The library may also use this space internally , for example , to
* store a group descriptor or other metadata : in this case the
* library will strip any internal data fields from the value before
* it is returned via a call to dm_stats_get_region_aux_data ( ) .
*
* The user data stored is not accessed by the library or kernel and
* may be used to store an arbitrary data word ( embedded whitespace is
* not permitted ) .
*
* An application using both the library and direct access to the
* @ stats_list device - mapper message may see the internal values stored
* in this field by the library . In such cases any string up to and
* including the first ' # ' in the field must be treated as an opaque
* value and preserved across any external modification of aux_data