2001-11-21 12:47:42 +00:00
/*
2004-03-30 19:08:57 +00:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2017-02-28 22:34:00 +01:00
* Copyright ( C ) 2004 - 2017 Red Hat , Inc . All rights reserved .
2015-08-05 05:21:58 +01:00
* Copyright ( C ) 2006 Rackable Systems All rights reserved .
2001-11-21 12:47:42 +00:00
*
2004-03-30 19:08:57 +00:00
* This file is part of the device - mapper userspace tools .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 11:49:46 +01:00
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA .
2001-11-21 12:47:42 +00:00
*/
# ifndef LIB_DEVICE_MAPPER_H
# define LIB_DEVICE_MAPPER_H
2001-12-05 16:41:52 +00:00
# include <inttypes.h>
2006-08-21 12:52:39 +00:00
# include <stdarg.h>
2002-01-15 15:21:57 +00:00
# include <sys/types.h>
2011-08-30 14:55:15 +00:00
# include <sys/stat.h>
2003-11-12 17:30:32 +00:00
2013-10-17 11:14:07 +02:00
# ifdef __linux__
2003-11-12 17:30:32 +00:00
# include <linux / types.h>
# endif
2001-12-05 16:41:52 +00:00
2005-10-16 22:57:20 +00:00
# include <limits.h>
# include <string.h>
# include <stdlib.h>
2007-07-24 14:15:45 +00:00
# include <stdio.h>
2005-10-16 22:57:20 +00:00
2010-06-23 17:03:14 +00:00
# ifndef __GNUC__
# define __typeof__ typeof
# endif
2013-05-29 12:46:21 +02:00
/* Macros to make string defines */
# define DM_TO_STRING_EXP(A) #A
# define DM_TO_STRING(A) DM_TO_STRING_EXP(A)
2013-07-06 20:35:43 +02:00
# define DM_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
2010-06-16 13:01:25 +00:00
# ifdef __cplusplus
extern " C " {
# endif
2005-11-09 14:10:50 +00:00
/*****************************************************************
2009-07-10 09:59:37 +00:00
* The first section of this file provides direct access to the
* individual device - mapper ioctls . Since it is quite laborious to
* build the ioctl arguments for the device - mapper , people are
* encouraged to use this library .
2005-11-09 14:10:50 +00:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2001-11-21 12:47:42 +00:00
/*
2009-07-10 09:59:37 +00:00
* The library user may wish to register their own
* logging function . By default errors go to stderr .
* Use dm_log_with_errno_init ( NULL ) to restore the default log fn .
2013-01-07 22:25:19 +00:00
* Error messages may have a non - zero errno .
* Debug messages may have a non - zero class .
2013-07-25 14:32:09 +02:00
* Aborts on internal error when env DM_ABORT_ON_INTERNAL_ERRORS is 1
2001-11-21 12:47:42 +00:00
*/
2009-07-10 09:59:37 +00:00
typedef void ( * dm_log_with_errno_fn ) ( int level , const char * file , int line ,
2013-01-07 22:25:19 +00:00
int dm_errno_or_class , const char * f , . . . )
2009-07-10 09:59:37 +00:00
__attribute__ ( ( format ( printf , 5 , 6 ) ) ) ;
void dm_log_with_errno_init ( dm_log_with_errno_fn fn ) ;
void dm_log_init_verbose ( int level ) ;
/*
* Original version of this function .
* dm_errno is set to 0.
*
* Deprecated : Use the _with_errno_ versions above instead .
*/
2002-03-07 20:56:10 +00:00
typedef void ( * dm_log_fn ) ( int level , const char * file , int line ,
2006-01-31 14:50:38 +00:00
const char * f , . . . )
__attribute__ ( ( format ( printf , 4 , 5 ) ) ) ;
2010-07-02 21:16:50 +00:00
2009-07-10 09:59:37 +00:00
void dm_log_init ( dm_log_fn fn ) ;
2001-11-21 12:47:42 +00:00
/*
2009-07-10 09:59:37 +00:00
* For backward - compatibility , indicate that dm_log_init ( ) was used
* to set a non - default value of dm_log ( ) .
2001-11-21 12:47:42 +00:00
*/
2009-07-10 09:59:37 +00:00
int dm_log_is_non_default ( void ) ;
2001-11-21 12:47:42 +00:00
2011-06-13 03:32:45 +00:00
/*
* Number of devices currently in suspended state ( via the library ) .
*/
int dm_get_suspended_counter ( void ) ;
2001-11-21 12:47:42 +00:00
enum {
DM_DEVICE_CREATE ,
DM_DEVICE_RELOAD ,
DM_DEVICE_REMOVE ,
2002-03-07 20:56:10 +00:00
DM_DEVICE_REMOVE_ALL ,
2001-11-21 12:47:42 +00:00
DM_DEVICE_SUSPEND ,
DM_DEVICE_RESUME ,
DM_DEVICE_INFO ,
2002-03-06 14:38:25 +00:00
DM_DEVICE_DEPS ,
2002-01-10 23:29:16 +00:00
DM_DEVICE_RENAME ,
2002-01-15 15:21:57 +00:00
DM_DEVICE_VERSION ,
2002-05-03 11:55:58 +00:00
DM_DEVICE_STATUS ,
DM_DEVICE_TABLE ,
2003-07-01 21:20:58 +00:00
DM_DEVICE_WAITEVENT ,
DM_DEVICE_LIST ,
2003-11-13 13:14:28 +00:00
DM_DEVICE_CLEAR ,
2004-01-23 14:37:47 +00:00
DM_DEVICE_MKNODES ,
2004-06-08 20:34:40 +00:00
DM_DEVICE_LIST_VERSIONS ,
2006-02-20 23:55:58 +00:00
DM_DEVICE_TARGET_MSG ,
DM_DEVICE_SET_GEOMETRY
2001-11-21 12:47:42 +00:00
} ;
2009-07-10 09:59:37 +00:00
/*
* You will need to build a struct dm_task for
* each ioctl command you want to execute .
*/
2015-11-30 21:16:43 +01:00
struct dm_pool ;
2001-11-21 12:47:42 +00:00
struct dm_task ;
2015-08-05 08:28:35 +01:00
struct dm_timestamp ;
2001-11-21 12:47:42 +00:00
struct dm_task * dm_task_create ( int type ) ;
void dm_task_destroy ( struct dm_task * dmt ) ;
int dm_task_set_name ( struct dm_task * dmt , const char * name ) ;
2002-03-11 22:44:36 +00:00
int dm_task_set_uuid ( struct dm_task * dmt , const char * uuid ) ;
2001-11-21 12:47:42 +00:00
/*
* Retrieve attributes after an info .
*/
struct dm_info {
int exists ;
int suspended ;
2003-07-01 21:20:58 +00:00
int live_table ;
int inactive_table ;
2003-03-28 18:58:59 +00:00
int32_t open_count ;
2003-04-29 11:34:23 +00:00
uint32_t event_nr ;
2003-03-28 18:58:59 +00:00
uint32_t major ;
uint32_t minor ; /* minor device number */
2002-01-03 15:12:02 +00:00
int read_only ; /* 0:read-write; 1:read-only */
2002-01-03 10:39:21 +00:00
2003-03-28 18:58:59 +00:00
int32_t target_count ;
2014-08-16 00:34:48 +01:00
int deferred_remove ;
2015-05-15 15:29:46 +02:00
int internal_suspend ;
2001-11-21 12:47:42 +00:00
} ;
2002-03-06 14:38:25 +00:00
struct dm_deps {
2003-03-28 18:58:59 +00:00
uint32_t count ;
uint32_t filler ;
uint64_t device [ 0 ] ;
2002-03-06 14:38:25 +00:00
} ;
2003-07-01 21:20:58 +00:00
struct dm_names {
uint64_t dev ;
uint32_t next ; /* Offset to next struct from start of this struct */
char name [ 0 ] ;
} ;
2004-01-23 14:37:47 +00:00
struct dm_versions {
2007-01-16 18:04:15 +00:00
uint32_t next ; /* Offset to next struct from start of this struct */
uint32_t version [ 3 ] ;
2004-01-23 14:37:47 +00:00
2007-01-16 18:04:15 +00:00
char name [ 0 ] ;
2004-01-23 14:37:47 +00:00
} ;
2002-01-17 14:13:25 +00:00
int dm_get_library_version ( char * version , size_t size ) ;
2002-03-07 20:56:10 +00:00
int dm_task_get_driver_version ( struct dm_task * dmt , char * version , size_t size ) ;
2001-11-21 12:47:42 +00:00
int dm_task_get_info ( struct dm_task * dmt , struct dm_info * dmi ) ;
2012-10-10 16:59:47 +02:00
/*
* This function returns dm device ' s UUID based on the value
* of the mangling mode set during preceding dm_task_run call :
2012-10-15 15:40:37 +02:00
* - unmangled UUID for DM_STRING_MANGLING_ { AUTO , HEX } ,
* - UUID without any changes for DM_STRING_MANGLING_NONE .
2012-10-10 16:59:47 +02:00
*
2012-10-15 15:40:37 +02:00
* To get mangled or unmangled form of the UUID directly , use
2012-10-10 16:59:47 +02:00
* dm_task_get_uuid_mangled or dm_task_get_uuid_unmangled function .
*/
2007-04-27 14:52:41 +00:00
const char * dm_task_get_uuid ( const struct dm_task * dmt ) ;
2001-11-21 12:47:42 +00:00
2002-03-06 14:38:25 +00:00
struct dm_deps * dm_task_get_deps ( struct dm_task * dmt ) ;
2004-01-23 14:37:47 +00:00
struct dm_versions * dm_task_get_versions ( struct dm_task * dmt ) ;
2013-08-16 15:25:39 +01:00
const char * dm_task_get_message_response ( struct dm_task * dmt ) ;
2002-03-06 14:38:25 +00:00
2012-02-15 12:01:28 +00:00
/*
* These functions return device - mapper names based on the value
* of the mangling mode set during preceding dm_task_run call :
* - unmangled name for DM_STRING_MANGLING_ { AUTO , HEX } ,
* - name without any changes for DM_STRING_MANGLING_NONE .
*
* To get mangled or unmangled form of the name directly , use
* dm_task_get_name_mangled or dm_task_get_name_unmangled function .
*/
const char * dm_task_get_name ( const struct dm_task * dmt ) ;
struct dm_names * dm_task_get_names ( struct dm_task * dmt ) ;
2012-02-15 11:39:38 +00:00
2002-01-03 10:39:21 +00:00
int dm_task_set_ro ( struct dm_task * dmt ) ;
2002-01-10 23:29:16 +00:00
int dm_task_set_newname ( struct dm_task * dmt , const char * newname ) ;
2010-10-15 01:10:27 +00:00
int dm_task_set_newuuid ( struct dm_task * dmt , const char * newuuid ) ;
2002-01-11 12:12:46 +00:00
int dm_task_set_minor ( struct dm_task * dmt , int minor ) ;
2003-04-02 19:03:00 +00:00
int dm_task_set_major ( struct dm_task * dmt , int major ) ;
2009-06-17 20:55:24 +00:00
int dm_task_set_major_minor ( struct dm_task * dmt , int major , int minor , int allow_default_major_fallback ) ;
2006-02-03 14:23:22 +00:00
int dm_task_set_uid ( struct dm_task * dmt , uid_t uid ) ;
int dm_task_set_gid ( struct dm_task * dmt , gid_t gid ) ;
int dm_task_set_mode ( struct dm_task * dmt , mode_t mode ) ;
2016-08-23 15:57:47 +02:00
/* See also description for DM_UDEV_DISABLE_LIBRARY_FALLBACK flag! */
2009-10-22 12:55:47 +00:00
int dm_task_set_cookie ( struct dm_task * dmt , uint32_t * cookie , uint16_t flags ) ;
2003-04-29 11:34:23 +00:00
int dm_task_set_event_nr ( struct dm_task * dmt , uint32_t event_nr ) ;
2006-02-20 23:55:58 +00:00
int dm_task_set_geometry ( struct dm_task * dmt , const char * cylinders , const char * heads , const char * sectors , const char * start ) ;
2004-06-08 20:34:40 +00:00
int dm_task_set_message ( struct dm_task * dmt , const char * message ) ;
int dm_task_set_sector ( struct dm_task * dmt , uint64_t sector ) ;
2006-10-12 15:42:25 +00:00
int dm_task_no_flush ( struct dm_task * dmt ) ;
2005-01-12 22:10:14 +00:00
int dm_task_no_open_count ( struct dm_task * dmt ) ;
2005-10-04 20:12:32 +00:00
int dm_task_skip_lockfs ( struct dm_task * dmt ) ;
2009-11-06 00:43:08 +00:00
int dm_task_query_inactive_table ( struct dm_task * dmt ) ;
2005-11-22 18:43:12 +00:00
int dm_task_suppress_identical_reload ( struct dm_task * dmt ) ;
2011-02-04 16:08:11 +00:00
int dm_task_secure_data ( struct dm_task * dmt ) ;
2011-09-22 17:09:48 +00:00
int dm_task_retry_remove ( struct dm_task * dmt ) ;
2014-08-16 00:34:48 +01:00
int dm_task_deferred_remove ( struct dm_task * dmt ) ;
2011-07-01 14:09:19 +00:00
2015-08-05 08:28:35 +01:00
/*
* Record timestamp immediately after the ioctl returns .
*/
int dm_task_set_record_timestamp ( struct dm_task * dmt ) ;
struct dm_timestamp * dm_task_get_ioctl_timestamp ( struct dm_task * dmt ) ;
2011-07-01 14:09:19 +00:00
/*
* Enable checks for common mistakes such as issuing ioctls in an unsafe order .
*/
int dm_task_enable_checks ( struct dm_task * dmt ) ;
2011-02-04 19:33:53 +00:00
typedef enum {
DM_ADD_NODE_ON_RESUME , /* add /dev/mapper node with dmsetup resume */
DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */
} dm_add_node_t ;
int dm_task_set_add_node ( struct dm_task * dmt , dm_add_node_t add_node ) ;
2002-01-10 23:29:16 +00:00
2007-11-27 20:57:05 +00:00
/*
* Control read_ahead .
*/
# define DM_READ_AHEAD_AUTO UINT32_MAX /* Use kernel default readahead */
# define DM_READ_AHEAD_NONE 0 /* Disable readahead */
# define DM_READ_AHEAD_MINIMUM_FLAG 0x1 /* Value supplied is minimum */
2007-12-05 16:28:19 +00:00
/*
* Read ahead is set with DM_DEVICE_CREATE with a table or DM_DEVICE_RESUME .
*/
2007-11-27 20:57:05 +00:00
int dm_task_set_read_ahead ( struct dm_task * dmt , uint32_t read_ahead ,
uint32_t read_ahead_flags ) ;
2007-11-30 14:59:57 +00:00
uint32_t dm_task_get_read_ahead ( const struct dm_task * dmt ,
uint32_t * read_ahead ) ;
2007-11-27 20:57:05 +00:00
2001-11-21 12:47:42 +00:00
/*
* Use these to prepare for a create or reload .
*/
int dm_task_add_target ( struct dm_task * dmt ,
2001-12-05 16:41:52 +00:00
uint64_t start ,
2002-03-07 20:56:10 +00:00
uint64_t size , const char * ttype , const char * params ) ;
2001-11-21 12:47:42 +00:00
2003-07-01 21:20:58 +00:00
/*
2008-09-02 12:16:07 +00:00
* Format major / minor numbers correctly for input to driver .
2003-07-01 21:20:58 +00:00
*/
2008-09-02 12:16:07 +00:00
# define DM_FORMAT_DEV_BUFSIZE 13 /* Minimum bufsize to handle worst case. */
2003-07-01 21:20:58 +00:00
int dm_format_dev ( char * buf , int bufsize , uint32_t dev_major , uint32_t dev_minor ) ;
2002-05-03 11:55:58 +00:00
/* Use this to retrive target information returned from a STATUS call */
void * dm_get_next_target ( struct dm_task * dmt ,
2002-05-10 15:25:38 +00:00
void * next , uint64_t * start , uint64_t * length ,
char * * target_type , char * * params ) ;
2002-05-03 11:55:58 +00:00
2013-02-01 11:31:47 -06:00
/*
2015-11-30 21:16:43 +01:00
* Following dm_get_status_ * functions will allocate approriate status structure
* from passed mempool together with the necessary character arrays .
* Destroying the mempool will release all asociated allocation .
2013-02-01 11:31:47 -06:00
*/
2011-12-21 12:52:38 +00:00
2015-11-30 21:16:43 +01:00
/* Parse params from STATUS call for mirror target */
typedef enum {
DM_STATUS_MIRROR_ALIVE = ' A ' , /* No failures */
DM_STATUS_MIRROR_FLUSH_FAILED = ' F ' , /* Mirror out-of-sync */
DM_STATUS_MIRROR_WRITE_FAILED = ' D ' , /* Mirror out-of-sync */
DM_STATUS_MIRROR_SYNC_FAILED = ' S ' , /* Mirror out-of-sync */
DM_STATUS_MIRROR_READ_FAILED = ' R ' , /* Mirror data unaffected */
DM_STATUS_MIRROR_UNCLASSIFIED = ' U ' /* Bug */
} dm_status_mirror_health_t ;
struct dm_status_mirror {
uint64_t total_regions ;
uint64_t insync_regions ;
2015-12-04 20:24:47 +01:00
uint32_t dev_count ; /* # of devs[] elements (<= 8) */
2015-11-30 21:16:43 +01:00
struct {
dm_status_mirror_health_t health ;
uint32_t major ;
uint32_t minor ;
2015-12-04 20:24:47 +01:00
} * devs ; /* array with individual legs */
const char * log_type ; /* core, disk,.... */
uint32_t log_count ; /* # of logs[] elements */
2015-11-30 21:16:43 +01:00
struct {
dm_status_mirror_health_t health ;
uint32_t major ;
uint32_t minor ;
2015-12-04 20:24:47 +01:00
} * logs ; /* array with individual logs */
2015-11-30 21:16:43 +01:00
} ;
int dm_get_status_mirror ( struct dm_pool * mem , const char * params ,
struct dm_status_mirror * * status ) ;
/* Parse params from STATUS call for raid target */
2013-02-01 11:31:47 -06:00
struct dm_status_raid {
2013-04-08 15:04:08 -05:00
uint64_t reserved ;
2016-03-24 17:42:36 +00:00
uint64_t total_regions ; /* sectors */
uint64_t insync_regions ; /* sectors */
2013-04-08 15:04:08 -05:00
uint64_t mismatch_count ;
2013-06-17 12:58:38 -05:00
uint32_t dev_count ;
2013-04-08 15:04:08 -05:00
char * raid_type ;
2016-11-16 15:29:09 +01:00
/* A - alive, a - alive not in-sync, D - dead/failed */
2013-04-08 15:04:08 -05:00
char * dev_health ;
2016-11-16 15:29:09 +01:00
/* idle, frozen, resync, recover, check, repair */
2013-04-08 15:04:08 -05:00
char * sync_action ;
2017-02-24 00:50:00 +01:00
uint64_t data_offset ; /* RAID out-of-place reshaping */
2013-02-01 11:31:47 -06:00
} ;
int dm_get_status_raid ( struct dm_pool * mem , const char * params ,
struct dm_status_raid * * status ) ;
2015-11-30 21:16:43 +01:00
/* Parse params from STATUS call for cache target */
2014-01-27 05:30:10 -06:00
struct dm_status_cache {
uint64_t version ; /* zero for now */
uint32_t metadata_block_size ; /* in 512B sectors */
uint32_t block_size ; /* AKA 'chunk_size' */
uint64_t metadata_used_blocks ;
uint64_t metadata_total_blocks ;
uint64_t used_blocks ;
uint64_t dirty_blocks ;
uint64_t total_blocks ;
uint64_t read_hits ;
uint64_t read_misses ;
uint64_t write_hits ;
uint64_t write_misses ;
uint64_t demotions ;
uint64_t promotions ;
2017-02-23 23:38:06 +01:00
uint64_t feature_flags ; /* DM_CACHE_FEATURE_? */
2014-01-27 05:30:10 -06:00
int core_argc ;
char * * core_argv ;
char * policy_name ;
2016-03-09 18:00:57 +01:00
int policy_argc ;
2014-01-27 05:30:10 -06:00
char * * policy_argv ;
2016-03-09 18:00:57 +01:00
unsigned error : 1 ; /* detected error (switches to fail soon) */
unsigned fail : 1 ; /* all I/O fails */
unsigned needs_check : 1 ; /* metadata needs check */
unsigned read_only : 1 ; /* metadata may not be changed */
uint32_t reserved : 28 ;
2014-01-27 05:30:10 -06:00
} ;
int dm_get_status_cache ( struct dm_pool * mem , const char * params ,
struct dm_status_cache * * status ) ;
2013-05-26 16:57:50 +02:00
/*
2015-11-30 21:16:43 +01:00
* Parse params from STATUS call for snapshot target
*
2013-05-26 16:57:50 +02:00
* Snapshot target ' s format :
* < = 1.7 .0 : < used_sectors > / < total_sectors >
* > = 1.8 .0 : < used_sectors > / < total_sectors > < metadata_sectors >
*/
struct dm_status_snapshot {
uint64_t used_sectors ; /* in 512b units */
uint64_t total_sectors ;
uint64_t metadata_sectors ;
unsigned has_metadata_sectors : 1 ; /* set when metadata_sectors is present */
unsigned invalid : 1 ; /* set when snapshot is invalidated */
unsigned merge_failed : 1 ; /* set when snapshot merge failed */
2015-09-18 14:41:00 +02:00
unsigned overflow : 1 ; /* set when snapshot overflows */
2013-05-26 16:57:50 +02:00
} ;
int dm_get_status_snapshot ( struct dm_pool * mem , const char * params ,
struct dm_status_snapshot * * status ) ;
2015-11-30 21:16:43 +01:00
/* Parse params from STATUS call for thin_pool target */
2012-12-10 10:23:17 +01:00
typedef enum {
DM_THIN_DISCARDS_IGNORE ,
DM_THIN_DISCARDS_NO_PASSDOWN ,
DM_THIN_DISCARDS_PASSDOWN
} dm_thin_discards_t ;
2011-12-21 12:52:38 +00:00
struct dm_status_thin_pool {
uint64_t transaction_id ;
2012-01-19 15:21:23 +00:00
uint64_t used_metadata_blocks ;
uint64_t total_metadata_blocks ;
2011-12-21 12:52:38 +00:00
uint64_t used_data_blocks ;
uint64_t total_data_blocks ;
2012-01-19 15:21:23 +00:00
uint64_t held_metadata_root ;
2015-01-13 15:23:03 +01:00
uint32_t read_only ; /* metadata may not be changed */
2012-12-10 10:23:17 +01:00
dm_thin_discards_t discards ;
2015-01-13 15:23:03 +01:00
uint32_t fail : 1 ; /* all I/O fails */
uint32_t error_if_no_space : 1 ; /* otherwise queue_if_no_space */
2016-02-15 16:33:38 +01:00
uint32_t out_of_data_space : 1 ; /* metadata may be changed, but data may not be allocated (no rw) */
uint32_t needs_check : 1 ; /* metadata needs check */
uint32_t error : 1 ; /* detected error (switches to fail soon) */
uint32_t reserved : 27 ;
2011-12-21 12:52:38 +00:00
} ;
int dm_get_status_thin_pool ( struct dm_pool * mem , const char * params ,
struct dm_status_thin_pool * * status ) ;
2015-11-30 21:16:43 +01:00
/* Parse params from STATUS call for thin target */
2011-12-21 12:52:38 +00:00
struct dm_status_thin {
uint64_t mapped_sectors ;
uint64_t highest_mapped_sector ;
2016-02-15 16:33:38 +01:00
uint32_t fail : 1 ; /* Thin volume fails I/O */
uint32_t reserved : 31 ;
2011-12-21 12:52:38 +00:00
} ;
int dm_get_status_thin ( struct dm_pool * mem , const char * params ,
struct dm_status_thin * * status ) ;
2015-08-05 10:40:00 +01:00
/*
* device - mapper statistics support
*/
/*
* Statistics handle .
*
* Operations on dm_stats objects include managing statistics regions
* and obtaining and manipulating current counter values from the
* kernel . Methods are provided to return baisc count values and to
* derive time - based metrics when a suitable interval estimate is
* provided .
*
* Internally the dm_stats handle contains a pointer to a table of one
* or more dm_stats_region objects representing the regions registered
* with the dm_stats_create_region ( ) method . These in turn point to a
* table of one or more dm_stats_counters objects containing the
* counter sets for each defined area within the region :
*
* dm_stats - > dm_stats_region [ nr_regions ] - > dm_stats_counters [ nr_areas ]
*
* This structure is private to the library and may change in future
* versions : all users should make use of the public interface and treat
* the dm_stats type as an opaque handle .
*
* Regions and counter sets are stored in order of increasing region_id .
* Depending on region specifications and the sequence of create and
* delete operations this may not correspond to increasing sector
* number : users of the library should not assume that this is the case
* unless region creation is deliberately managed to ensure this ( by
* always creating regions in strict order of ascending sector address ) .
*
* Regions may also overlap so the same sector range may be included in
* more than one region or area : applications should be prepared to deal
* with this or manage regions such that it does not occur .
*/
struct dm_stats ;
2015-08-19 20:39:10 +01:00
/*
* Histogram handle .
*
* A histogram object represents the latency histogram values and bin
* boundaries of the histogram associated with a particular area .
*
* Operations on the handle allow the number of bins , bin boundaries ,
* counts and relative proportions to be obtained as well as the
* conversion of a histogram or its bounds to a compact string
* representation .
*/
struct dm_histogram ;
2015-08-05 10:40:00 +01:00
/*
* Allocate a dm_stats handle to use for subsequent device - mapper
* statistics operations . A program_id may be specified and will be
* used by default for subsequent operations on this handle .
*
* If program_id is NULL or the empty string a program_id will be
* automatically set to the value contained in / proc / self / comm .
*/
struct dm_stats * dm_stats_create ( const char * program_id ) ;
/*
* Bind a dm_stats handle to the specified device major and minor
* values . Any previous binding is cleared and any preexisting counter
* data contained in the handle is released .
*/
int dm_stats_bind_devno ( struct dm_stats * dms , int major , int minor ) ;
/*
* Bind a dm_stats handle to the specified device name .
* Any previous binding is cleared and any preexisting counter
* data contained in the handle is released .
*/
int dm_stats_bind_name ( struct dm_stats * dms , const char * name ) ;
/*
* Bind a dm_stats handle to the specified device UUID .
* Any previous binding is cleared and any preexisting counter
* data contained in the handle is released .
*/
int dm_stats_bind_uuid ( struct dm_stats * dms , const char * uuid ) ;
2016-12-18 14:40:57 +00:00
/*
* Bind a dm_stats handle to the device backing the file referenced
* by the specified file descriptor .
*
* File descriptor fd must reference a regular file , open for reading ,
* in a local file system , backed by a device - mapper device , that
* supports the FIEMAP ioctl , and that returns data describing the
* physical location of extents .
*/
int dm_stats_bind_from_fd ( struct dm_stats * dms , int fd ) ;
2015-08-18 16:37:03 +01:00
/*
* Test whether the running kernel supports the precise_timestamps
* feature . Presence of this feature also implies histogram support .
* The library will check this call internally and fails any attempt
* to use nanosecond counters or histograms on kernels that fail to
* meet this check .
*/
int dm_message_supports_precise_timestamps ( void ) ;
2015-08-20 11:55:06 +01:00
/*
2015-08-19 20:39:10 +01:00
* Precise timetamps and histogram support .
2015-08-20 11:55:06 +01:00
*
2015-08-19 20:39:10 +01:00
* Test for the presence of precise_timestamps and histogram support .
2015-08-20 11:55:06 +01:00
*/
int dm_stats_driver_supports_precise ( void ) ;
2015-08-19 20:39:10 +01:00
int dm_stats_driver_supports_histogram ( void ) ;
2015-08-20 11:55:06 +01:00
2015-08-17 18:08:18 +01:00
/*
* Returns 1 if the specified region has the precise_timestamps feature
* enabled ( i . e . produces nanosecond - precision counter values ) or 0 for
* a region using the default milisecond precision .
*/
int dm_stats_get_region_precise_timestamps ( const struct dm_stats * dms ,
uint64_t region_id ) ;
/*
* Returns 1 if the region at the current cursor location has the
* precise_timestamps feature enabled ( i . e . produces
* nanosecond - precision counter values ) or 0 for a region using the
* default milisecond precision .
*/
int dm_stats_get_current_region_precise_timestamps ( const struct dm_stats * dms ) ;
2015-08-05 10:40:00 +01:00
# define DM_STATS_ALL_PROGRAMS ""
/*
* Parse the response from a @ stats_list message . dm_stats_list will
* allocate the necessary dm_stats and dm_stats region structures from
* the embedded dm_pool . No counter data will be obtained ( the counters
* members of dm_stats_region objects are set to NULL ) .
*
* A program_id may optionally be supplied ; if the argument is non - NULL
* only regions with a matching program_id value will be considered . If
* the argument is NULL then the default program_id associated with the
* dm_stats handle will be used . Passing the special value
* DM_STATS_ALL_PROGRAMS will cause all regions to be queried
* regardless of region program_id .
*/
int dm_stats_list ( struct dm_stats * dms , const char * program_id ) ;
# define DM_STATS_REGIONS_ALL UINT64_MAX
/*
* Populate a dm_stats object with statistics for one or more regions of
* the specified device .
*
* A program_id may optionally be supplied ; if the argument is non - NULL
* only regions with a matching program_id value will be considered . If
* the argument is NULL then the default program_id associated with the
* dm_stats handle will be used . Passing the special value
* DM_STATS_ALL_PROGRAMS will cause all regions to be queried
* regardless of region program_id .
*
* Passing the special value DM_STATS_REGIONS_ALL as the region_id
* argument will attempt to retrieve all regions selected by the
* program_id argument .
*
* If region_id is used to request a single region_id to be populated
* the program_id is ignored .
*/
int dm_stats_populate ( struct dm_stats * dms , const char * program_id ,
uint64_t region_id ) ;
/*
* Create a new statistics region on the device bound to dms .
*
* start and len specify the region start and length in 512 b sectors .
* Passing zero for both start and len will create a region spanning
* the entire device .
*
* Step determines how to subdivide the region into discrete counter
* sets : a positive value specifies the size of areas into which the
* region should be split while a negative value will split the region
* into a number of areas equal to the absolute value of step :
*
* - a region with one area spanning the entire device :
*
* dm_stats_create_region ( dms , 0 , 0 , - 1 , p , a ) ;
*
* - a region with areas of 1 MiB :
*
* dm_stats_create_region ( dms , 0 , 0 , 1 < < 11 , p , a ) ;
*
* - one 1 MiB region starting at 1024 sectors with two areas :
*
* dm_stats_create_region ( dms , 1024 , 1 < < 11 , - 2 , p , a ) ;
*
2015-08-20 11:55:06 +01:00
* If precise is non - zero attempt to create a region with nanosecond
* precision counters using the kernel precise_timestamps feature .
*
2015-08-19 20:39:10 +01:00
* precise - A flag to request nanosecond precision counters
* to be used for this region .
*
* histogram_bounds - specify the boundaries of a latency histogram to
* be tracked for the region . The values are expressed as an array of
* uint64_t terminated with a zero . Values must be in order of ascending
* magnitude and specify the upper bounds of successive histogram bins
* in nanoseconds ( with an implicit lower bound of zero on the first bin
* and an implicit upper bound of infinity on the final bin ) . For
* example :
*
* uint64_t bounds_ary [ ] = { 1000 , 2000 , 3000 , 0 } ;
*
* Specifies a histogram with four bins : 0 - 1000 ns , 1000 - 2000 ns ,
* 2000 - 3000 ns and > 3000 ns .
*
* The smallest latency value that can be tracked for a region not using
* precise_timestamps is 1 ms : attempting to create a region with
* histogram boundaries < 1 ms will cause the precise_timestamps feature
* to be enabled for that region automatically if it was not requested
* explicitly .
*
2015-08-05 10:40:00 +01:00
* program_id is an optional string argument that identifies the
* program creating the region . If program_id is NULL or the empty
* string the default program_id stored in the handle will be used .
*
2016-07-05 15:17:44 +01:00
* user_data is an optional string argument that is added to the
* content of the aux_data field stored with the statistics region by
* the kernel .
*
* The library may also use this space internally , for example , to
* store a group descriptor or other metadata : in this case the
* library will strip any internal data fields from the value before
* it is returned via a call to dm_stats_get_region_aux_data ( ) .
*
* The user data stored is not accessed by the library or kernel and
* may be used to store an arbitrary data word ( embedded whitespace is
* not permitted ) .
*
* An application using both the library and direct access to the
* @ stats_list device - mapper message may see the internal values stored
* in this field by the library . In such cases any string up to and
* including the first ' # ' in the field must be treated as an opaque
* value and preserved across any external modification of aux_data .
2015-08-05 10:40:00 +01:00
*
* The region_id of the newly - created region is returned in * region_id
* if it is non - NULL .
*/
int dm_stats_create_region ( struct dm_stats * dms , uint64_t * region_id ,
uint64_t start , uint64_t len , int64_t step ,
2015-08-19 20:39:10 +01:00
int precise , struct dm_histogram * bounds ,
2016-07-05 15:17:44 +01:00
const char * program_id , const char * user_data ) ;
2015-08-05 10:40:00 +01:00
/*
* Delete the specified statistics region . This will also mark the
* region as not - present and discard any existing statistics data .
*/
int dm_stats_delete_region ( struct dm_stats * dms , uint64_t region_id ) ;
/*
* Clear the specified statistics region . This requests the kernel to
* zero all counter values ( except in - flight I / O ) . Note that this
* operation is not atomic with respect to reads of the counters ; any IO
* events occurring between the last print operation and the clear will
* be lost . This can be avoided by using the atomic print - and - clear
* function of the dm_stats_print_region ( ) call or by using the higher
* level dm_stats_populate ( ) interface .
*/
int dm_stats_clear_region ( struct dm_stats * dms , uint64_t region_id ) ;