mirror of
git://sourceware.org/git/lvm2.git
synced 2024-10-05 03:49:50 +03:00
39b7d1ba8f
Collection of typos in code comments. Should have no runtime effect.
5189 lines
128 KiB
C
5189 lines
128 KiB
C
/*
|
|
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
|
|
*
|
|
* _stats_get_extents_for_file() based in part on filefrag_fiemap() from
|
|
* e2fsprogs/misc/filefrag.c. Copyright 2003 by Theodore Ts'o.
|
|
*
|
|
* This file is part of the device-mapper userspace tools.
|
|
*
|
|
* This copyrighted material is made available to anyone wishing to use,
|
|
* modify, copy, or redistribute it subject to the terms and conditions
|
|
* of the GNU Lesser General Public License v.2.1.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public License
|
|
* along with this program; if not, write to the Free Software Foundation,
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
|
|
#include "libdm/misc/dmlib.h"
|
|
#include "libdm/misc/kdev_t.h"
|
|
|
|
#include "math.h" /* log10() */
|
|
|
|
#include <sys/sysmacros.h>
|
|
#include <sys/ioctl.h>
|
|
#include <sys/vfs.h> /* fstatfs */
|
|
|
|
#ifdef __linux__
|
|
#include <linux/fs.h> /* FS_IOC_FIEMAP */
|
|
#endif
|
|
|
|
#ifdef HAVE_LINUX_FIEMAP_H
|
|
#include <linux/fiemap.h> /* fiemap */
|
|
#endif
|
|
|
|
#ifdef HAVE_LINUX_MAGIC_H
|
|
#include <linux/magic.h> /* BTRFS_SUPER_MAGIC */
|
|
#endif
|
|
|
|
#define DM_STATS_REGION_NOT_PRESENT UINT64_MAX
|
|
#define DM_STATS_GROUP_NOT_PRESENT DM_STATS_GROUP_NONE
|
|
|
|
#define NSEC_PER_USEC 1000L
|
|
#define NSEC_PER_MSEC 1000000L
|
|
#define NSEC_PER_SEC 1000000000L
|
|
|
|
#define PRECISE_ARG "precise_timestamps"
|
|
#define HISTOGRAM_ARG "histogram:"
|
|
|
|
#define STATS_ROW_BUF_LEN 4096
|
|
#define STATS_MSG_BUF_LEN 1024
|
|
#define STATS_FIE_BUF_LEN 2048
|
|
|
|
#define SECTOR_SHIFT 9L
|
|
|
|
/* Histogram bin */
|
|
struct dm_histogram_bin {
|
|
uint64_t upper; /* Upper bound on this bin. */
|
|
uint64_t count; /* Count value for this bin. */
|
|
};
|
|
|
|
struct dm_histogram {
|
|
/* The stats handle this histogram belongs to. */
|
|
const struct dm_stats *dms;
|
|
/* The region this histogram belongs to. */
|
|
const struct dm_stats_region *region;
|
|
uint64_t sum; /* Sum of histogram bin counts. */
|
|
int nr_bins; /* Number of histogram bins assigned. */
|
|
struct dm_histogram_bin bins[];
|
|
};
|
|
|
|
/*
|
|
* See Documentation/device-mapper/statistics.txt for full descriptions
|
|
* of the device-mapper statistics counter fields.
|
|
*/
|
|
struct dm_stats_counters {
|
|
uint64_t reads; /* Num reads completed */
|
|
uint64_t reads_merged; /* Num reads merged */
|
|
uint64_t read_sectors; /* Num sectors read */
|
|
uint64_t read_nsecs; /* Num milliseconds spent reading */
|
|
uint64_t writes; /* Num writes completed */
|
|
uint64_t writes_merged; /* Num writes merged */
|
|
uint64_t write_sectors; /* Num sectors written */
|
|
uint64_t write_nsecs; /* Num milliseconds spent writing */
|
|
uint64_t io_in_progress; /* Num I/Os currently in progress */
|
|
uint64_t io_nsecs; /* Num milliseconds spent doing I/Os */
|
|
uint64_t weighted_io_nsecs; /* Weighted num milliseconds doing I/Os */
|
|
uint64_t total_read_nsecs; /* Total time spent reading in milliseconds */
|
|
uint64_t total_write_nsecs; /* Total time spent writing in milliseconds */
|
|
struct dm_histogram *histogram; /* Histogram. */
|
|
};
|
|
|
|
struct dm_stats_region {
|
|
uint64_t region_id; /* as returned by @stats_list */
|
|
uint64_t group_id;
|
|
uint64_t start;
|
|
uint64_t len;
|
|
uint64_t step;
|
|
char *program_id;
|
|
char *aux_data;
|
|
uint64_t timescale; /* precise_timestamps is per-region */
|
|
struct dm_histogram *bounds; /* histogram configuration */
|
|
struct dm_histogram *histogram; /* aggregate cache */
|
|
struct dm_stats_counters *counters;
|
|
};
|
|
|
|
struct dm_stats_group {
|
|
uint64_t group_id;
|
|
const char *alias;
|
|
dm_bitset_t regions;
|
|
struct dm_histogram *histogram;
|
|
};
|
|
|
|
struct dm_stats {
|
|
/* device binding */
|
|
int bind_major; /* device major that this dm_stats object is bound to */
|
|
int bind_minor; /* device minor that this dm_stats object is bound to */
|
|
char *bind_name; /* device-mapper device name */
|
|
char *bind_uuid; /* device-mapper UUID */
|
|
char *program_id; /* default program_id for this handle */
|
|
const char *name; /* cached device_name used for reporting */
|
|
struct dm_pool *mem; /* memory pool for region and counter tables */
|
|
struct dm_pool *hist_mem; /* separate pool for histogram tables */
|
|
struct dm_pool *group_mem; /* separate pool for group tables */
|
|
uint64_t nr_regions; /* total number of present regions */
|
|
uint64_t max_region; /* size of the regions table */
|
|
uint64_t interval_ns; /* sampling interval in nanoseconds */
|
|
uint64_t timescale; /* default sample value multiplier */
|
|
int precise; /* use precise_timestamps when creating regions */
|
|
struct dm_stats_region *regions;
|
|
struct dm_stats_group *groups;
|
|
/* statistics cursor */
|
|
uint64_t walk_flags; /* walk control flags */
|
|
uint64_t cur_flags;
|
|
uint64_t cur_group;
|
|
uint64_t cur_region;
|
|
uint64_t cur_area;
|
|
};
|
|
|
|
static char *_stats_escape_aux_data(const char *aux_data)
|
|
{
|
|
size_t aux_data_len = strlen(aux_data);
|
|
char *escaped = dm_malloc((3 * aux_data_len + 1) * sizeof(char));
|
|
size_t index = 0, i;
|
|
|
|
if (!escaped) {
|
|
log_error("Could not allocate memory for escaped "
|
|
"aux_data string.");
|
|
return NULL;
|
|
}
|
|
|
|
for (i = 0; i < aux_data_len; i++) {
|
|
if (aux_data[i] == ' ') {
|
|
escaped[index++] = '\\';
|
|
escaped[index++] = ' ';
|
|
} else if (aux_data[i] == '\\') {
|
|
escaped[index++] = '\\';
|
|
escaped[index++] = '\\';
|
|
} else if (aux_data[i] == '\t') {
|
|
escaped[index++] = '\\';
|
|
escaped[index++] = '\t';
|
|
} else {
|
|
escaped[index++] = aux_data[i];
|
|
}
|
|
}
|
|
escaped[index] = '\0';
|
|
return escaped;
|
|
}
|
|
|
|
#define PROC_SELF_COMM "/proc/self/comm"
|
|
static char *_program_id_from_proc(void)
|
|
{
|
|
FILE *comm = NULL;
|
|
char buf[STATS_ROW_BUF_LEN];
|
|
|
|
if (!(comm = fopen(PROC_SELF_COMM, "r")))
|
|
return_NULL;
|
|
|
|
if (!fgets(buf, sizeof(buf), comm)) {
|
|
log_error("Could not read from %s", PROC_SELF_COMM);
|
|
if (fclose(comm))
|
|
stack;
|
|
return NULL;
|
|
}
|
|
|
|
if (fclose(comm))
|
|
stack;
|
|
|
|
return dm_strdup(buf);
|
|
}
|
|
|
|
static uint64_t _nr_areas(uint64_t len, uint64_t step)
|
|
{
|
|
/* Default is one area. */
|
|
if (!len || !step)
|
|
return 1;
|
|
/*
|
|
* drivers/md/dm-stats.c::message_stats_create()
|
|
* A region may be sub-divided into areas with their own counters.
|
|
* Any partial area at the end of the region is treated as an
|
|
* additional complete area.
|
|
*/
|
|
return (len + step - 1) / step;
|
|
}
|
|
|
|
static uint64_t _nr_areas_region(struct dm_stats_region *region)
|
|
{
|
|
return _nr_areas(region->len, region->step);
|
|
}
|
|
|
|
struct dm_stats *dm_stats_create(const char *program_id)
|
|
{
|
|
size_t hist_hint = sizeof(struct dm_histogram_bin);
|
|
size_t group_hint = sizeof(struct dm_stats_group);
|
|
struct dm_stats *dms = NULL;
|
|
|
|
if (!(dms = dm_zalloc(sizeof(*dms))))
|
|
return_NULL;
|
|
|
|
/* FIXME: better hint. */
|
|
if (!(dms->mem = dm_pool_create("stats_pool", 4096))) {
|
|
dm_free(dms);
|
|
return_NULL;
|
|
}
|
|
|
|
if (!(dms->hist_mem = dm_pool_create("histogram_pool", hist_hint)))
|
|
goto_bad;
|
|
|
|
if (!(dms->group_mem = dm_pool_create("group_pool", group_hint)))
|
|
goto_bad;
|
|
|
|
if (!program_id || !strlen(program_id))
|
|
dms->program_id = _program_id_from_proc();
|
|
else
|
|
dms->program_id = dm_strdup(program_id);
|
|
|
|
if (!dms->program_id) {
|
|
log_error("Could not allocate memory for program_id");
|
|
goto bad;
|
|
}
|
|
|
|
dms->bind_major = -1;
|
|
dms->bind_minor = -1;
|
|
dms->bind_name = NULL;
|
|
dms->bind_uuid = NULL;
|
|
|
|
dms->name = NULL;
|
|
|
|
/* by default all regions use msec precision */
|
|
dms->timescale = NSEC_PER_MSEC;
|
|
dms->precise = 0;
|
|
|
|
dms->nr_regions = DM_STATS_REGION_NOT_PRESENT;
|
|
dms->max_region = DM_STATS_REGION_NOT_PRESENT;
|
|
dms->regions = NULL;
|
|
|
|
/* maintain compatibility with earlier walk version */
|
|
dms->walk_flags = dms->cur_flags = DM_STATS_WALK_DEFAULT;
|
|
|
|
return dms;
|
|
|
|
bad:
|
|
dm_pool_destroy(dms->mem);
|
|
if (dms->hist_mem)
|
|
dm_pool_destroy(dms->hist_mem);
|
|
if (dms->group_mem)
|
|
dm_pool_destroy(dms->group_mem);
|
|
dm_free(dms);
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Test whether the stats region pointed to by region is present.
|
|
*/
|
|
static int _stats_region_present(const struct dm_stats_region *region)
|
|
{
|
|
return !(region->region_id == DM_STATS_REGION_NOT_PRESENT);
|
|
}
|
|
|
|
/*
|
|
* Test whether the stats group pointed to by group is present.
|
|
*/
|
|
static int _stats_group_present(const struct dm_stats_group *group)
|
|
{
|
|
return !(group->group_id == DM_STATS_GROUP_NOT_PRESENT);
|
|
}
|
|
|
|
/*
|
|
* Test whether a stats group id is present.
|
|
*/
|
|
static int _stats_group_id_present(const struct dm_stats *dms, uint64_t id)
|
|
{
|
|
struct dm_stats_group *group = NULL;
|
|
|
|
if (id == DM_STATS_GROUP_NOT_PRESENT)
|
|
return 0;
|
|
|
|
if (!dms)
|
|
return_0;
|
|
|
|
if (!dms->regions)
|
|
return 0;
|
|
|
|
if (id > dms->max_region)
|
|
return 0;
|
|
|
|
group = &dms->groups[id];
|
|
|
|
return _stats_group_present(group);
|
|
}
|
|
|
|
/*
|
|
* Test whether the given region_id is a member of any group.
|
|
*/
|
|
static uint64_t _stats_region_is_grouped(const struct dm_stats* dms,
|
|
uint64_t region_id)
|
|
{
|
|
uint64_t group_id;
|
|
|
|
if (region_id == DM_STATS_GROUP_NOT_PRESENT)
|
|
return 0;
|
|
|
|
if (!_stats_region_present(&dms->regions[region_id]))
|
|
return 0;
|
|
|
|
group_id = dms->regions[region_id].group_id;
|
|
|
|
return group_id != DM_STATS_GROUP_NOT_PRESENT;
|
|
}
|
|
|
|
static void _stats_histograms_destroy(struct dm_pool *mem,
|
|
struct dm_stats_region *region)
|
|
{
|
|
/* Unpopulated handle. */
|
|
if (!region->counters)
|
|
return;
|
|
|
|
/*
|
|
* Free everything in the pool back to the first histogram.
|
|
*/
|
|
if (region->counters[0].histogram)
|
|
dm_pool_free(mem, region->counters[0].histogram);
|
|
}
|
|
|
|
static void _stats_region_destroy(struct dm_stats_region *region)
|
|
{
|
|
if (!_stats_region_present(region))
|
|
return;
|
|
|
|
region->start = region->len = region->step = 0;
|
|
region->timescale = 0;
|
|
|
|
/*
|
|
* Don't free counters and histogram bounds here: they are
|
|
* dropped from the pool along with the corresponding
|
|
* regions table.
|
|
*
|
|
* The following objects are all allocated with dm_malloc.
|
|
*/
|
|
|
|
region->counters = NULL;
|
|
region->bounds = NULL;
|
|
|
|
dm_free(region->program_id);
|
|
region->program_id = NULL;
|
|
dm_free(region->aux_data);
|
|
region->aux_data = NULL;
|
|
region->region_id = DM_STATS_REGION_NOT_PRESENT;
|
|
}
|
|
|
|
static void _stats_regions_destroy(struct dm_stats *dms)
|
|
{
|
|
struct dm_pool *mem = dms->mem;
|
|
uint64_t i;
|
|
|
|
if (!dms->regions)
|
|
return;
|
|
|
|
/* walk backwards to obey pool order */
|
|
for (i = dms->max_region; (i != DM_STATS_REGION_NOT_PRESENT); i--) {
|
|
_stats_histograms_destroy(dms->hist_mem, &dms->regions[i]);
|
|
_stats_region_destroy(&dms->regions[i]);
|
|
}
|
|
|
|
dm_pool_free(mem, dms->regions);
|
|
dms->regions = NULL;
|
|
}
|
|
|
|
static void _stats_group_destroy(struct dm_stats_group *group)
|
|
{
|
|
if (!_stats_group_present(group))
|
|
return;
|
|
|
|
group->histogram = NULL;
|
|
|
|
if (group->alias) {
|
|
dm_free((char *) group->alias);
|
|
group->alias = NULL;
|
|
}
|
|
if (group->regions) {
|
|
dm_bitset_destroy(group->regions);
|
|
group->regions = NULL;
|
|
}
|
|
group->group_id = DM_STATS_GROUP_NOT_PRESENT;
|
|
}
|
|
|
|
static void _stats_groups_destroy(struct dm_stats *dms)
|
|
{
|
|
uint64_t i;
|
|
|
|
if (!dms->groups)
|
|
return;
|
|
|
|
for (i = dms->max_region; (i != DM_STATS_REGION_NOT_PRESENT); i--)
|
|
_stats_group_destroy(&dms->groups[i]);
|
|
dm_pool_free(dms->group_mem, dms->groups);
|
|
dms->groups = NULL;
|
|
}
|
|
|
|
static int _set_stats_device(struct dm_stats *dms, struct dm_task *dmt)
|
|
{
|
|
if (dms->bind_name)
|
|
return dm_task_set_name(dmt, dms->bind_name);
|
|
if (dms->bind_uuid)
|
|
return dm_task_set_uuid(dmt, dms->bind_uuid);
|
|
if (dms->bind_major > 0)
|
|
return dm_task_set_major(dmt, dms->bind_major)
|
|
&& dm_task_set_minor(dmt, dms->bind_minor);
|
|
return_0;
|
|
}
|
|
|
|
static int _stats_bound(const struct dm_stats *dms)
|
|
{
|
|
if (dms->bind_major > 0 || dms->bind_name || dms->bind_uuid)
|
|
return 1;
|
|
/* %p format specifier expects a void pointer. */
|
|
log_error("Stats handle at %p is not bound.", (const void *)dms);
|
|
return 0;
|
|
}
|
|
|
|
static void _stats_clear_binding(struct dm_stats *dms)
|
|
{
|
|
if (dms->bind_name)
|
|
dm_pool_free(dms->mem, dms->bind_name);
|
|
if (dms->bind_uuid)
|
|
dm_pool_free(dms->mem, dms->bind_uuid);
|
|
dm_free((char *) dms->name);
|
|
|
|
dms->bind_name = dms->bind_uuid = NULL;
|
|
dms->bind_major = dms->bind_minor = -1;
|
|
dms->name = NULL;
|
|
}
|
|
|
|
int dm_stats_bind_devno(struct dm_stats *dms, int major, int minor)
|
|
{
|
|
_stats_clear_binding(dms);
|
|
_stats_regions_destroy(dms);
|
|
_stats_groups_destroy(dms);
|
|
|
|
dms->bind_major = major;
|
|
dms->bind_minor = minor;
|
|
|
|
return 1;
|
|
}
|
|
|
|
int dm_stats_bind_name(struct dm_stats *dms, const char *name)
|
|
{
|
|
_stats_clear_binding(dms);
|
|
_stats_regions_destroy(dms);
|
|
_stats_groups_destroy(dms);
|
|
|
|
if (!(dms->bind_name = dm_pool_strdup(dms->mem, name)))
|
|
return_0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
int dm_stats_bind_uuid(struct dm_stats *dms, const char *uuid)
|
|
{
|
|
_stats_clear_binding(dms);
|
|
_stats_regions_destroy(dms);
|
|
_stats_groups_destroy(dms);
|
|
|
|
if (!(dms->bind_uuid = dm_pool_strdup(dms->mem, uuid)))
|
|
return_0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
int dm_stats_bind_from_fd(struct dm_stats *dms, int fd)
|
|
{
|
|
int major, minor;
|
|
struct stat buf;
|
|
|
|
if (fstat(fd, &buf)) {
|
|
log_error("fstat failed for fd %d.", fd);
|
|
return 0;
|
|
}
|
|
|
|
major = (int) MAJOR(buf.st_dev);
|
|
minor = (int) MINOR(buf.st_dev);
|
|
|
|
if (!dm_stats_bind_devno(dms, major, minor))
|
|
return_0;
|
|
return 1;
|
|
}
|
|
|
|
static int _stats_check_precise_timestamps(const struct dm_stats *dms)
|
|
{
|
|
/* Already checked? */
|
|
if (dms && dms->precise)
|
|
return 1;
|
|
|
|
return dm_message_supports_precise_timestamps();
|
|
}
|
|
|
|
int dm_stats_driver_supports_precise(void)
|
|
{
|
|
return _stats_check_precise_timestamps(NULL);
|
|
}
|
|
|
|
int dm_stats_driver_supports_histogram(void)
|
|
{
|
|
return _stats_check_precise_timestamps(NULL);
|
|
}
|
|
|
|
static int _fill_hist_arg(char *hist_arg, size_t hist_len, uint64_t scale,
|
|
struct dm_histogram *bounds)
|
|
{
|
|
int i, l, len = 0, nr_bins;
|
|
char *arg = hist_arg;
|
|
uint64_t value;
|
|
|
|
nr_bins = bounds->nr_bins;
|
|
|
|
for (i = 0; i < nr_bins; i++) {
|
|
value = bounds->bins[i].upper / scale;
|
|
if ((l = dm_snprintf(arg, hist_len - len, FMTu64"%s", value,
|
|
(i == (nr_bins - 1)) ? "" : ",")) < 0)
|
|
return_0;
|
|
len += l;
|
|
arg += l;
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
static void *_get_hist_arg(struct dm_histogram *bounds, uint64_t scale,
|
|
size_t *len)
|
|
{
|
|
struct dm_histogram_bin *entry, *bins;
|
|
size_t hist_len = 1; /* terminating '\0' */
|
|
double value;
|
|
|
|
entry = bins = bounds->bins;
|
|
|
|
entry += bounds->nr_bins - 1;
|
|
while(entry >= bins) {
|
|
value = (double) (entry--)->upper;
|
|
/* Use lround to avoid size_t -> double cast warning. */
|
|
hist_len += 1 + (size_t) lround(log10(value / scale));
|
|
if (entry != bins)
|
|
hist_len++; /* ',' */
|
|
}
|
|
|
|
*len = hist_len;
|
|
|
|
return dm_zalloc(hist_len);
|
|
}
|
|
|
|
static char *_build_histogram_arg(struct dm_histogram *bounds, int *precise)
|
|
{
|
|
struct dm_histogram_bin *entry, *bins;
|
|
size_t hist_len;
|
|
char *hist_arg;
|
|
uint64_t scale;
|
|
|
|
entry = bins = bounds->bins;
|
|
|
|
/* Empty histogram is invalid. */
|
|
if (!bounds->nr_bins) {
|
|
log_error("Cannot format empty histogram description.");
|
|
return NULL;
|
|
}
|
|
|
|
/* Validate entries and set *precise if precision < 1ms. */
|
|
entry += bounds->nr_bins - 1;
|
|
while (entry >= bins) {
|
|
if (entry != bins) {
|
|
if (entry->upper < (entry - 1)->upper) {
|
|
log_error("Histogram boundaries must be in "
|
|
"order of increasing magnitude.");
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Only enable precise_timestamps automatically if any
|
|
* value in the histogram bounds uses precision < 1ms.
|
|
*/
|
|
if (((entry--)->upper % NSEC_PER_MSEC) && !*precise)
|
|
*precise = 1;
|
|
}
|
|
|
|
scale = (*precise) ? 1 : NSEC_PER_MSEC;
|
|
|
|
/* Calculate hist_len and allocate a character buffer. */
|
|
if (!(hist_arg = _get_hist_arg(bounds, scale, &hist_len))) {
|
|
log_error("Could not allocate memory for histogram argument.");
|
|
return 0;
|
|
}
|
|
|
|
/* Fill hist_arg with boundary strings. */
|
|
if (!_fill_hist_arg(hist_arg, hist_len, scale, bounds))
|
|
goto_bad;
|
|
|
|
return hist_arg;
|
|
|
|
bad:
|
|
log_error("Could not build histogram arguments.");
|
|
dm_free(hist_arg);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static struct dm_task *_stats_send_message(struct dm_stats *dms, char *msg)
|
|
{
|
|
struct dm_task *dmt;
|
|
|
|
if (!(dmt = dm_task_create(DM_DEVICE_TARGET_MSG)))
|
|
return_0;
|
|
|
|
if (!_set_stats_device(dms, dmt))
|
|
goto_bad;
|
|
|
|
if (!dm_task_set_message(dmt, msg))
|
|
goto_bad;
|
|
|
|
if (!dm_task_run(dmt))
|
|
goto_bad;
|
|
|
|
return dmt;
|
|
|
|
bad:
|
|
dm_task_destroy(dmt);
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Cache the dm device_name for the device bound to dms.
|
|
*/
|
|
static int _stats_set_name_cache(struct dm_stats *dms)
|
|
{
|
|
struct dm_task *dmt;
|
|
|
|
if (dms->name)
|
|
return 1;
|
|
|
|
if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
|
|
return_0;
|
|
|
|
if (!_set_stats_device(dms, dmt))
|
|
goto_bad;
|
|
|
|
if (!dm_task_run(dmt))
|
|
goto_bad;
|
|
|
|
if (!(dms->name = dm_strdup(dm_task_get_name(dmt))))
|
|
goto_bad;
|
|
|
|
dm_task_destroy(dmt);
|
|
|
|
return 1;
|
|
|
|
bad:
|
|
log_error("Could not retrieve device-mapper name for device.");
|
|
dm_task_destroy(dmt);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* update region group_id values
|
|
*/
|
|
static void _stats_update_groups(struct dm_stats *dms)
|
|
{
|
|
struct dm_stats_group *group;
|
|
uint64_t group_id, i;
|
|
|
|
for (group_id = 0; group_id < dms->max_region + 1; group_id++) {
|
|
if (!_stats_group_id_present(dms, group_id))
|
|
continue;
|
|
|
|
group = &dms->groups[group_id];
|
|
|
|
for (i = dm_bit_get_first(group->regions);
|
|
i != DM_STATS_GROUP_NOT_PRESENT;
|
|
i = dm_bit_get_next(group->regions, i))
|
|
dms->regions[i].group_id = group_id;
|
|
}
|
|
}
|
|
|
|
static void _check_group_regions_present(struct dm_stats *dms,
|
|
struct dm_stats_group *group)
|
|
{
|
|
dm_bitset_t regions = group->regions;
|
|
int64_t i, group_id;
|
|
|
|
group_id = i = dm_bit_get_first(regions);
|
|
|
|
for (; i > 0; i = dm_bit_get_next(regions, i))
|
|
if (!_stats_region_present(&dms->regions[i])) {
|
|
log_warn("Group descriptor " FMTd64 " contains "
|
|
"non-existent region_id " FMTd64 ".",
|
|
group_id, i);
|
|
dm_bit_clear(regions, i);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Parse a DMS_GROUP group descriptor embedded in a region's aux_data.
|
|
*
|
|
* DMS_GROUP="ALIAS:MEMBERS"
|
|
*
|
|
* ALIAS: group alias
|
|
* MEMBERS: list of group member region ids.
|
|
*
|
|
*/
|
|
#define DMS_GROUP_TAG "DMS_GROUP="
|
|
#define DMS_GROUP_TAG_LEN (sizeof(DMS_GROUP_TAG) - 1)
|
|
#define DMS_GROUP_SEP ':'
|
|
#define DMS_AUX_SEP "#"
|
|
#define DMS_AUX_SEP_CHAR '#'
|
|
#define DMS_GROUP_QUOTE '"'
|
|
|
|
static int _parse_aux_data_group(struct dm_stats *dms,
|
|
struct dm_stats_region *region,
|
|
struct dm_stats_group *group)
|
|
{
|
|
char *alias, *c, *end;
|
|
dm_bitset_t regions;
|
|
|
|
memset(group, 0, sizeof(*group));
|
|
group->group_id = DM_STATS_GROUP_NOT_PRESENT;
|
|
|
|
/* find start of group tag */
|
|
c = strstr(region->aux_data, DMS_GROUP_TAG);
|
|
if (!c)
|
|
return 1; /* no group is not an error */
|
|
|
|
/* extract alias from quotes */
|
|
alias = c + strlen(DMS_GROUP_TAG) + 1;
|
|
|
|
c = strchr(c, DMS_GROUP_SEP);
|
|
|
|
if (!c) {
|
|
log_error("Found malformed group tag while reading aux_data");
|
|
return 0;
|
|
}
|
|
|
|
/* terminate alias and advance to members accounting for closing quote */
|
|
*(c - 1) = '\0';
|
|
c++;
|
|
|
|
log_debug("Read alias '%s' from aux_data", alias);
|
|
|
|
if (!c) {
|
|
log_error("Found malformed group descriptor while "
|
|
"reading aux_data, expected '%c'", DMS_GROUP_SEP);
|
|
return 0;
|
|
}
|
|
|
|
/* if user aux_data follows make sure we have a terminated
|
|
* string to pass to dm_bitset_parse_list().
|
|
*/
|
|
end = strstr(c, DMS_AUX_SEP);
|
|
if (!end)
|
|
end = c + strlen(c);
|
|
*(end++) = '\0';
|
|
|
|
if (!(regions = dm_bitset_parse_list(c, NULL, 0))) {
|
|
log_error("Could not parse member list while "
|
|
"reading group aux_data");
|
|
return 0;
|
|
}
|
|
|
|
group->group_id = dm_bit_get_first(regions);
|
|
if (group->group_id != region->region_id) {
|
|
log_error("Found invalid group descriptor in region " FMTu64
|
|
" aux_data.", region->region_id);
|
|
group->group_id = DM_STATS_GROUP_NOT_PRESENT;
|
|
goto bad;
|
|
}
|
|
|
|
group->regions = regions;
|
|
group->alias = NULL;
|
|
if (strlen(alias)) {
|
|
group->alias = dm_strdup(alias);
|
|
if (!group->alias) {
|
|
log_error("Could not allocate memory for group alias");
|
|
goto bad;
|
|
}
|
|
}
|
|
|
|
/* separate group tag from user aux_data */
|
|
if ((strlen(end) > 1) || strncmp(end, "-", 1))
|
|
c = dm_strdup(end);
|
|
else
|
|
c = dm_strdup("");
|
|
|
|
if (!c) {
|
|
log_error("Could not allocate memory for user aux_data");
|
|
goto bad_alias;
|
|
}
|
|
|
|
dm_free(region->aux_data);
|
|
region->aux_data = c;
|
|
|
|
log_debug("Found group_id " FMTu64 ": alias=\"%s\"", group->group_id,
|
|
(group->alias) ? group->alias : "");
|
|
|
|
return 1;
|
|
|
|
bad_alias:
|
|
dm_free((char *) group->alias);
|
|
bad:
|
|
dm_bitset_destroy(regions);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Parse a histogram specification returned by the kernel in a
|
|
* @stats_list response.
|
|
*/
|
|
static int _stats_parse_histogram_spec(struct dm_stats *dms,
|
|
struct dm_stats_region *region,
|
|
const char *histogram)
|
|
{
|
|
const char valid_chars[] = "0123456789,";
|
|
uint64_t scale = region->timescale, this_val = 0;
|
|
struct dm_pool *mem = dms->hist_mem;
|
|
struct dm_histogram_bin cur;
|
|
struct dm_histogram hist = { 0 };
|
|
int nr_bins = 1;
|
|
const char *c, *v, *val_start;
|
|
char *p, *endptr = NULL;
|
|
|
|
/* Advance past "histogram:". */
|
|
histogram = strchr(histogram, ':');
|
|
if (!histogram) {
|
|
log_error("Could not parse histogram description.");
|
|
return 0;
|
|
}
|
|
histogram++;
|
|
|
|
/* @stats_list rows are newline terminated. */
|
|
if ((p = strchr(histogram, '\n')))
|
|
*p = '\0';
|
|
|
|
if (!dm_pool_begin_object(mem, sizeof(cur)))
|
|
return_0;
|
|
|
|
hist.nr_bins = 0; /* fix later */
|
|
hist.region = region;
|
|
hist.dms = dms;
|
|
|
|
if (!dm_pool_grow_object(mem, &hist, sizeof(hist)))
|
|
goto_bad;
|
|
|
|
c = histogram;
|
|
do {
|
|
for (v = valid_chars; *v; v++)
|
|
if (*c == *v)
|
|
break;
|
|
if (!*v) {
|
|
stack;
|
|
goto badchar;
|
|
}
|
|
|
|
if (*c == ',') {
|
|
log_error("Invalid histogram description: %s",
|
|
histogram);
|
|
goto bad;
|
|
} else {
|
|
val_start = c;
|
|
endptr = NULL;
|
|
|
|
errno = 0;
|
|
this_val = strtoull(val_start, &endptr, 10);
|
|
if (errno || !endptr) {
|
|
log_error("Could not parse histogram boundary.");
|
|
goto bad;
|
|
}
|
|
|
|
c = endptr; /* Advance to units, comma, or end. */
|
|
|
|
if (*c == ',')
|
|
c++;
|
|
else if (*c || (*c == ' ')) { /* Expected ',' or NULL. */
|
|
stack;
|
|
goto badchar;
|
|
}
|
|
|
|
if (*c == ',')
|
|
c++;
|
|
|
|
cur.upper = scale * this_val;
|
|
cur.count = 0;
|
|
|
|
if (!dm_pool_grow_object(mem, &cur, sizeof(cur)))
|
|
goto_bad;
|
|
|
|
nr_bins++;
|
|
}
|
|
} while (*c && (*c != ' '));
|
|
|
|
/* final upper bound. */
|
|
cur.upper = UINT64_MAX;
|
|
if (!dm_pool_grow_object(mem, &cur, sizeof(cur)))
|
|
goto_bad;
|
|
|
|
region->bounds = dm_pool_end_object(mem);
|
|
|
|
if (!region->bounds)
|
|
return_0;
|
|
|
|
region->bounds->nr_bins = nr_bins;
|
|
|
|
log_debug("Added region histogram spec with %d entries.", nr_bins);
|
|
return 1;
|
|
|
|
badchar:
|
|
log_error("Invalid character in histogram: '%c' (0x%x)", *c, *c);
|
|
bad:
|
|
dm_pool_abandon_object(mem);
|
|
return 0;
|
|
}
|
|
|
|
static int _stats_parse_string_data(char *string_data, char **program_id,
|
|
char **aux_data, char **stats_args)
|
|
{
|
|
char *p, *next_gap, *empty_string = (char *)"";
|
|
size_t len;
|
|
|
|
/*
|
|
* String data format:
|
|
* <program_id> <aux_data> [precise_timestamps] [histogram:n1,n2,n3,..]
|
|
*/
|
|
|
|
/* Remove trailing whitespace */
|
|
len = strlen(string_data);
|
|
if (len > 0 && (string_data)[len - 1] == '\n') {
|
|
(string_data)[len - 1] = '\0';
|
|
}
|
|
p = strchr(string_data, ' ');
|
|
*program_id = string_data;
|
|
if (!p) {
|
|
*aux_data = *stats_args = empty_string;
|
|
return 1;
|
|
}
|
|
|
|
*p = '\0';
|
|
|
|
p++;
|
|
if (strstr(p, DMS_GROUP_TAG)) {
|
|
*aux_data = p;
|
|
/* Skip over the group tag */
|
|
if ((next_gap = strchr(p, DMS_AUX_SEP_CHAR)))
|
|
next_gap = strchr(next_gap, ' ');
|
|
if (next_gap) {
|
|
*(next_gap++) = '\0';
|
|
*stats_args = next_gap++;
|
|
} else
|
|
*stats_args = empty_string;
|
|
} else {
|
|
next_gap = strchr(p, ' ');
|
|
if (next_gap) {
|
|
*next_gap = '\0';
|
|
*aux_data = p;
|
|
*stats_args = next_gap + 1;
|
|
} else {
|
|
*aux_data = p;
|
|
*stats_args = empty_string;
|
|
}
|
|
}
|
|
|
|
if (!strncmp(*program_id, "-", 1))
|
|
*program_id = empty_string;
|
|
|
|
if (!strncmp(*aux_data, "-", 1))
|
|
*aux_data = empty_string;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int _stats_parse_list_region(struct dm_stats *dms,
|
|
struct dm_stats_region *region, char *line)
|
|
{
|
|
char string_data[STATS_ROW_BUF_LEN] = { 0 };
|
|
char *p, *program_id, *aux_data, *stats_args;
|
|
int r;
|
|
|
|
/*
|
|
* Parse fixed fields, line format:
|
|
*
|
|
* <region_id>: <start_sector>+<length> <step> <string data>
|
|
*
|
|
* Maximum string data size is 4096 - 1 bytes.
|
|
*/
|
|
r = sscanf(line, FMTu64 ": " FMTu64 "+" FMTu64 " " FMTu64 " %4095c",
|
|
®ion->region_id, ®ion->start, ®ion->len,
|
|
®ion->step, string_data);
|
|
|
|
if (r != 5) {
|
|
return 0;
|
|
}
|
|
|
|
if (!_stats_parse_string_data(string_data, &program_id, &aux_data, &stats_args)) {
|
|
return_0;
|
|
}
|
|
|
|
region->timescale = strstr(stats_args, PRECISE_ARG) ? 1 : NSEC_PER_MSEC;
|
|
|
|
p = strstr(stats_args, HISTOGRAM_ARG);
|
|
if (p) {
|
|
if (!_stats_parse_histogram_spec(dms, region, p)) {
|
|
return_0;
|
|
}
|
|
} else {
|
|
region->bounds = NULL;
|
|
}
|
|
|
|
region->histogram = NULL;
|
|
region->group_id = DM_STATS_GROUP_NOT_PRESENT;
|
|
|
|
if (!(region->program_id = dm_strdup(program_id))) {
|
|
return_0;
|
|
}
|
|
|
|
if (!(region->aux_data = dm_strdup(aux_data))) {
|
|
dm_free(region->program_id);
|
|
return_0;
|
|
}
|
|
|
|
region->counters = NULL;
|
|
return 1;
|
|
}
|
|
|
|
static int _stats_parse_list(struct dm_stats *dms, const char *resp)
|
|
{
|
|
uint64_t max_region = 0, nr_regions = 0;
|
|
struct dm_stats_region cur, fill;
|
|
struct dm_stats_group cur_group;
|
|
struct dm_pool *mem = dms->mem, *group_mem = dms->group_mem;
|
|
char line[STATS_ROW_BUF_LEN];
|
|
FILE *list_rows;
|
|
|
|
if (!resp) {
|
|
log_error("Could not parse NULL @stats_list response.");
|
|
return 0;
|
|
}
|
|
|
|
_stats_regions_destroy(dms);
|
|
_stats_groups_destroy(dms);
|
|
|
|
/* no regions */
|
|
if (!strlen(resp)) {
|
|
dms->nr_regions = dms->max_region = 0;
|
|
dms->regions = NULL;
|
|
return 1;
|
|
}
|
|
|
|
/*
|
|
* dm_task_get_message_response() returns a 'const char *' but
|
|
* since fmemopen also permits "w" it expects a 'char *'.
|
|
*/
|
|
/* coverity[alloc_strlen] intentional */
|
|
if (!(list_rows = fmemopen((char *)resp, strlen(resp), "r")))
|
|
return_0;
|
|
|
|
/* begin region table */
|
|
if (!dm_pool_begin_object(mem, 1024))
|
|
goto_bad;
|
|
|
|
/* begin group table */
|
|
if (!dm_pool_begin_object(group_mem, 32))
|
|
goto_bad;
|
|
|
|
while(fgets(line, sizeof(line), list_rows)) {
|
|
|
|
cur_group.group_id = DM_STATS_GROUP_NOT_PRESENT;
|
|
cur_group.regions = NULL;
|
|
cur_group.alias = NULL;
|
|
|
|
if (!_stats_parse_list_region(dms, &cur, line))
|
|
goto_bad;
|
|
|
|
/* handle holes in the list of region_ids */
|
|
if (cur.region_id > max_region) {
|
|
memset(&fill, 0, sizeof(fill));
|
|
memset(&cur_group, 0, sizeof(cur_group));
|
|
fill.region_id = DM_STATS_REGION_NOT_PRESENT;
|
|
cur_group.group_id = DM_STATS_GROUP_NOT_PRESENT;
|
|
do {
|
|
if (!dm_pool_grow_object(mem, &fill, sizeof(fill)))
|
|
goto_bad;
|
|
if (!dm_pool_grow_object(group_mem, &cur_group,
|
|
sizeof(cur_group)))
|
|
goto_bad;
|
|
} while (max_region++ < (cur.region_id - 1));
|
|
}
|
|
|
|
if (cur.aux_data)
|
|
if (!_parse_aux_data_group(dms, &cur, &cur_group))
|
|
log_error("Failed to parse group descriptor "
|
|
"from region_id " FMTu64 " aux_data:"
|
|
"'%s'", cur.region_id, cur.aux_data);
|
|
/* continue */
|
|
|
|
if (!dm_pool_grow_object(mem, &cur, sizeof(cur)))
|
|
goto_bad;
|
|
|
|
if (!dm_pool_grow_object(group_mem, &cur_group,
|
|
sizeof(cur_group)))
|
|
goto_bad;
|
|
|
|
max_region++;
|
|
nr_regions++;
|
|
}
|
|
|
|
if (!nr_regions)
|
|
/* no region data read from @stats_list */
|
|
goto bad;
|
|
|
|
dms->nr_regions = nr_regions;
|
|
dms->max_region = max_region - 1;
|
|
dms->regions = dm_pool_end_object(mem);
|
|
dms->groups = dm_pool_end_object(group_mem);
|
|
|
|
dm_stats_foreach_group(dms)
|
|
_check_group_regions_present(dms, &dms->groups[dms->cur_group]);
|
|
|
|
_stats_update_groups(dms);
|
|
|
|
if (fclose(list_rows))
|
|
stack;
|
|
|
|
return 1;
|
|
|
|
bad:
|
|
if (fclose(list_rows))
|
|
stack;
|
|
dm_pool_abandon_object(mem);
|
|
dm_pool_abandon_object(group_mem);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int dm_stats_list(struct dm_stats *dms, const char *program_id)
|
|
{
|
|
char msg[STATS_MSG_BUF_LEN];
|
|
struct dm_task *dmt;
|
|
int r;
|
|
|
|
if (!_stats_bound(dms))
|
|
return_0;
|
|
|
|
/* allow zero-length program_id for list */
|
|
if (!program_id)
|
|
program_id = dms->program_id;
|
|
|
|
if (!_stats_set_name_cache(dms))
|
|
return_0;
|
|
|
|
if (dms->regions)
|
|
_stats_regions_destroy(dms);
|
|
|
|
r = dm_snprintf(msg, sizeof(msg), "@stats_list %s", program_id);
|
|
|
|
if (r < 0) {
|
|
log_error("Failed to prepare stats message.");
|
|
return 0;
|
|
}
|
|
|
|
if (!(dmt = _stats_send_message(dms, msg)))
|
|
return_0;
|
|
|
|
if (!_stats_parse_list(dms, dm_task_get_message_response(dmt))) {
|
|
log_error("Could not parse @stats_list response.");
|
|
goto bad;
|
|
}
|
|
|
|
dm_task_destroy(dmt);
|
|
return 1;
|
|
|
|
bad:
|
|
dm_task_destroy(dmt);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Parse histogram data returned from a @stats_print operation.
|
|
*/
|
|
static int _stats_parse_histogram(struct dm_pool *mem, char *hist_str,
|
|
struct dm_histogram **histogram,
|
|
struct dm_stats_region *region)
|
|
{
|
|
const char valid_chars[] = "0123456789:";
|
|
struct dm_histogram *bounds = region->bounds;
|
|
struct dm_histogram hist = {
|
|
.nr_bins = region->bounds->nr_bins
|
|
};
|
|
const char *c, *v, *val_start;
|
|
struct dm_histogram_bin cur;
|
|
uint64_t sum = 0, this_val;
|
|
char *endptr = NULL;
|
|
int bin = 0;
|
|
|
|
c = hist_str;
|
|
|
|
if (!dm_pool_begin_object(mem, sizeof(cur)))
|
|
return_0;
|
|
|
|
if (!dm_pool_grow_object(mem, &hist, sizeof(hist)))
|
|
goto_bad;
|
|
|
|
do {
|
|
memset(&cur, 0, sizeof(cur));
|
|
for (v = valid_chars; *v; v++)
|
|
if (*c == *v)
|
|
break;
|
|
if (!*v)
|
|
goto badchar;
|
|
|
|
if (*c == ',')
|
|
goto badchar;
|
|
else {
|
|
val_start = c;
|
|
endptr = NULL;
|
|
|
|
errno = 0;
|
|
this_val = strtoull(val_start, &endptr, 10);
|
|
if (errno || !endptr) {
|
|
log_error("Could not parse histogram value.");
|
|
goto bad;
|
|
}
|
|
c = endptr; /* Advance to colon, or end. */
|
|
|
|
if (*c == ':')
|
|
c++;
|
|
else if (*c & (*c != '\n'))
|
|
/* Expected ':', '\n', or NULL. */
|
|
goto badchar;
|
|
|
|
if (*c == ':')
|
|
c++;
|
|
|
|
cur.upper = bounds->bins[bin].upper;
|
|
cur.count = this_val;
|
|
sum += this_val;
|
|
|
|
if (!dm_pool_grow_object(mem, &cur, sizeof(cur)))
|
|
goto_bad;
|
|
|
|
bin++;
|
|
}
|
|
} while (*c && (*c != '\n'));
|
|
|
|
log_debug("Added region histogram data with %d entries.", hist.nr_bins);
|
|
|
|
*histogram = dm_pool_end_object(mem);
|
|
(*histogram)->sum = sum;
|
|
|
|
return 1;
|
|
|
|
badchar:
|
|
log_error("Invalid character in histogram data: '%c' (0x%x)", *c, *c);
|
|
bad:
|
|
dm_pool_abandon_object(mem);
|
|
return 0;
|
|
}
|
|
|
|
static int _stats_parse_region(struct dm_stats *dms, const char *resp,
|
|
struct dm_stats_region *region,
|
|
uint64_t timescale)
|
|
{
|
|
struct dm_histogram *hist = NULL;
|
|
struct dm_pool *mem = dms->mem;
|
|
struct dm_stats_counters cur;
|
|
FILE *stats_rows = NULL;
|
|
uint64_t start = 0, len = 0;
|
|
char row[STATS_ROW_BUF_LEN];
|
|
int r;
|
|
|
|
if (!resp) {
|
|
log_error("Could not parse empty @stats_print response.");
|
|
return 0;
|
|
}
|
|
|
|
region->start = UINT64_MAX;
|
|
|
|
if (!dm_pool_begin_object(mem, 512))
|
|
goto_bad;
|
|
|
|
/*
|
|
* dm_task_get_message_response() returns a 'const char *' but
|
|
* since fmemopen also permits "w" it expects a 'char *'.
|
|
*/
|
|
/* coverity[alloc_strlen] intentional */
|
|
stats_rows = fmemopen((char *)resp, strlen(resp), "r");
|
|
if (!stats_rows)
|
|
goto_bad;
|
|
|
|
/*
|
|
* Output format for each step-sized area of a region:
|
|
*
|
|
* <start_sector>+<length> counters
|
|
*
|
|
* The first 11 counters have the same meaning as
|
|
* /sys/block/ * /stat or /proc/diskstats.
|
|
*
|
|
* Please refer to Documentation/iostats.txt for details.
|
|
*
|
|
* 1. the number of reads completed
|
|
* 2. the number of reads merged
|
|
* 3. the number of sectors read
|
|
* 4. the number of milliseconds spent reading
|
|
* 5. the number of writes completed
|
|
* 6. the number of writes merged
|
|
* 7. the number of sectors written
|
|
* 8. the number of milliseconds spent writing
|
|
* 9. the number of I/Os currently in progress
|
|
* 10. the number of milliseconds spent doing I/Os
|
|
* 11. the weighted number of milliseconds spent doing I/Os
|
|
*
|
|
* Additional counters:
|
|
* 12. the total time spent reading in milliseconds
|
|
* 13. the total time spent writing in milliseconds
|
|
*
|
|
*/
|
|
while (fgets(row, sizeof(row), stats_rows)) {
|
|
r = sscanf(row, FMTu64 "+" FMTu64 /* start+len */
|
|
/* reads */
|
|
FMTu64 " " FMTu64 " " FMTu64 " " FMTu64 " "
|
|
/* writes */
|
|
FMTu64 " " FMTu64 " " FMTu64 " " FMTu64 " "
|
|
/* in flight & io nsecs */
|
|
FMTu64 " " FMTu64 " " FMTu64 " "
|
|
/* tot read/write nsecs */
|
|
FMTu64 " " FMTu64, &start, &len,
|
|
&cur.reads, &cur.reads_merged, &cur.read_sectors,
|
|
&cur.read_nsecs,
|
|
&cur.writes, &cur.writes_merged, &cur.write_sectors,
|
|
&cur.write_nsecs,
|
|
&cur.io_in_progress,
|
|
&cur.io_nsecs, &cur.weighted_io_nsecs,
|
|
&cur.total_read_nsecs, &cur.total_write_nsecs);
|
|
if (r != 15) {
|
|
log_error("Could not parse @stats_print row.");
|
|
goto bad;
|
|
}
|
|
|
|
/* scale time values up if needed */
|
|
if (timescale != 1) {
|
|
cur.read_nsecs *= timescale;
|
|
cur.write_nsecs *= timescale;
|
|
cur.io_nsecs *= timescale;
|
|
cur.weighted_io_nsecs *= timescale;
|
|
cur.total_read_nsecs *= timescale;
|
|
cur.total_write_nsecs *= timescale;
|
|
}
|
|
|
|
if (region->bounds) {
|
|
/* Find first histogram separator. */
|
|
char *hist_str = strchr(row, ':');
|
|
if (!hist_str) {
|
|
log_error("Could not parse histogram value.");
|
|
goto bad;
|
|
}
|
|
/* Find space preceding histogram. */
|
|
while (hist_str && *(hist_str - 1) != ' ')
|
|
hist_str--;
|
|
|
|
/* Use a separate pool for histogram objects since we
|
|
* are growing the area table and each area's histogram
|
|
* table simultaneously.
|
|
*/
|
|
if (!_stats_parse_histogram(dms->hist_mem, hist_str,
|
|
&hist, region))
|
|
goto_bad;
|
|
hist->dms = dms;
|
|
hist->region = region;
|
|
}
|
|
|
|
cur.histogram = hist;
|
|
|
|
if (!dm_pool_grow_object(mem, &cur, sizeof(cur)))
|
|
goto_bad;
|
|
|
|
if (region->start == UINT64_MAX) {
|
|
region->start = start;
|
|
region->step = len; /* area size is always uniform. */
|
|
}
|
|
}
|
|
|
|
if (region->start == UINT64_MAX)
|
|
/* no area data read from @stats_print */
|
|
goto bad;
|
|
|
|
region->len = (start + len) - region->start;
|
|
region->timescale = timescale;
|
|
region->counters = dm_pool_end_object(mem);
|
|
|
|
if (fclose(stats_rows))
|
|
stack;
|
|
|
|
return 1;
|
|
|
|
bad:
|
|
if (stats_rows)
|
|
if (fclose(stats_rows))
|
|
stack;
|
|
dm_pool_abandon_object(mem);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void _stats_walk_next_present(const struct dm_stats *dms,
|
|
uint64_t *flags,
|
|
uint64_t *cur_r, uint64_t *cur_a,
|
|
uint64_t *cur_g)
|
|
{
|
|
struct dm_stats_region *cur = NULL;
|
|
|
|
/* start of walk: region loop advances *cur_r to 0. */
|
|
if (*cur_r != DM_STATS_REGION_NOT_PRESENT)
|
|
cur = &dms->regions[*cur_r];
|
|
|
|
/* within current region? */
|
|
if (cur && (*flags & DM_STATS_WALK_AREA)) {
|
|
if (++(*cur_a) < _nr_areas_region(cur))
|
|
return;
|
|
else
|
|
*cur_a = 0;
|
|
}
|
|
|
|
/* advance to next present, non-skipped region or end */
|
|
/* count can start as UINT64_MAX, probably rework to use post++ */
|
|
/* coverity[overflow_const] overflow is expected here */
|
|
while (++(*cur_r) <= dms->max_region) {
|
|
cur = &dms->regions[*cur_r];
|
|
if (!_stats_region_present(cur))
|
|
continue;
|
|
if ((*flags & DM_STATS_WALK_SKIP_SINGLE_AREA))
|
|
if (!(*flags & DM_STATS_WALK_AREA))
|
|
if (_nr_areas_region(cur) < 2)
|
|
continue;
|
|
/* matching region found */
|
|
break;
|
|
}
|
|
return;
|
|
}
|
|
|
|
static void _stats_walk_next(const struct dm_stats *dms, uint64_t *flags,
|
|
uint64_t *cur_r, uint64_t *cur_a, uint64_t *cur_g)
|
|
{
|
|
if (!dms || !dms->regions)
|
|
return;
|
|
|
|
if (*flags & DM_STATS_WALK_AREA) {
|
|
/* advance to next area, region, or end */
|
|
_stats_walk_next_present(dms, flags, cur_r, cur_a, cur_g);
|
|
return;
|
|
}
|
|
|
|
if (*flags & DM_STATS_WALK_REGION) {
|
|
/* enable region aggregation */
|
|
*cur_a = DM_STATS_WALK_REGION;
|
|
_stats_walk_next_present(dms, flags, cur_r, cur_a, cur_g);
|
|
return;
|
|
}
|
|
|
|
if (*flags & DM_STATS_WALK_GROUP) {
|
|
/* enable group aggregation */
|
|
*cur_r = *cur_a = DM_STATS_WALK_GROUP;
|
|
while (!_stats_group_id_present(dms, ++(*cur_g))
|
|
&& (*cur_g) < dms->max_region + 1)
|
|
; /* advance to next present group or end */
|
|
return;
|
|
}
|
|
|
|
log_error("stats_walk_next called with empty walk flags");
|
|
}
|
|
|
|
static void _group_walk_start(const struct dm_stats *dms, uint64_t *flags,
|
|
uint64_t *cur_r, uint64_t *cur_a, uint64_t *cur_g)
|
|
{
|
|
if (!(*flags & DM_STATS_WALK_GROUP))
|
|
return;
|
|
|
|
*cur_a = *cur_r = DM_STATS_WALK_GROUP;
|
|
*cur_g = 0;
|
|
|
|
/* advance to next present group or end */
|
|
while ((*cur_g) <= dms->max_region) {
|
|
if (_stats_region_is_grouped(dms, *cur_g))
|
|
break;
|
|
(*cur_g)++;
|
|
}
|
|
|
|
if (*cur_g > dms->max_region)
|
|
/* no groups to walk */
|
|
*flags &= ~DM_STATS_WALK_GROUP;
|
|
}
|
|
|
|
static void _stats_walk_start(const struct dm_stats *dms, uint64_t *flags,
|
|
uint64_t *cur_r, uint64_t *cur_a,
|
|
uint64_t *cur_g)
|
|
{
|
|
log_debug("starting stats walk with %s %s %s %s",
|
|
(*flags & DM_STATS_WALK_AREA) ? "AREA" : "",
|
|
(*flags & DM_STATS_WALK_REGION) ? "REGION" : "",
|
|
(*flags & DM_STATS_WALK_GROUP) ? "GROUP" : "",
|
|
(*flags & DM_STATS_WALK_SKIP_SINGLE_AREA) ? "SKIP" : "");
|
|
|
|
if (!dms->regions)
|
|
return;
|
|
|
|
if (!(*flags & (DM_STATS_WALK_AREA | DM_STATS_WALK_REGION))) {
|
|
_group_walk_start(dms, flags, cur_r, cur_a, cur_g);
|
|
return;
|
|
}
|
|
|
|
/* initialise cursor state */
|
|
*cur_a = 0;
|
|
*cur_r = DM_STATS_REGION_NOT_PRESENT;
|
|
*cur_g = DM_STATS_GROUP_NOT_PRESENT;
|
|
|
|
if (!(*flags & DM_STATS_WALK_AREA))
|
|
*cur_a = DM_STATS_WALK_REGION;
|
|
|
|
/* advance to first present, non-skipped region */
|
|
_stats_walk_next_present(dms, flags, cur_r, cur_a, cur_g);
|
|
}
|
|
|
|
#define DM_STATS_WALK_MASK (DM_STATS_WALK_AREA \
|
|
| DM_STATS_WALK_REGION \
|
|
| DM_STATS_WALK_GROUP \
|
|
| DM_STATS_WALK_SKIP_SINGLE_AREA)
|
|
|
|
int dm_stats_walk_init(struct dm_stats *dms, uint64_t flags)
|
|
{
|
|
if (!dms)
|
|
return_0;
|
|
|
|
if (flags & ~DM_STATS_WALK_MASK) {
|
|
log_error("Unknown value in walk flags: 0x" FMTx64,
|
|
(uint64_t) (flags & ~DM_STATS_WALK_MASK));
|
|
return 0;
|
|
}
|
|
dms->walk_flags = flags;
|
|
log_debug("dm_stats_walk_init: initialised flags to " FMTx64, flags);
|
|
return 1;
|
|
}
|
|
|
|
void dm_stats_walk_start(struct dm_stats *dms)
|
|
{
|
|
if (!dms || !dms->regions)
|
|
return;
|
|
|
|
dms->cur_flags = dms->walk_flags;
|
|
|
|
_stats_walk_start(dms, &dms->cur_flags,
|
|
&dms->cur_region, &dms->cur_area,
|
|
&dms->cur_group);
|
|
}
|
|
|
|
void dm_stats_walk_next(struct dm_stats *dms)
|
|
{
|
|
_stats_walk_next(dms, &dms->cur_flags,
|
|
&dms->cur_region, &dms->cur_area,
|
|
&dms->cur_group);
|
|
}
|
|
|
|
void dm_stats_walk_next_region(struct dm_stats *dms)
|
|
{
|
|
dms->cur_flags &= ~DM_STATS_WALK_AREA;
|
|
_stats_walk_next(dms, &dms->cur_flags,
|
|
&dms->cur_region, &dms->cur_area,
|
|
&dms->cur_group);
|
|
}
|
|
|
|
/*
|
|
* Return 1 if any regions remain that are present and not skipped
|
|
* by the current walk flags or 0 otherwise.
|
|
*/
|
|
static uint64_t _stats_walk_any_unskipped(const struct dm_stats *dms,
|
|
uint64_t *flags,
|
|
uint64_t *cur_r, uint64_t *cur_a)
|
|
{
|
|
struct dm_stats_region *region;
|
|
uint64_t i;
|
|
|
|
if (*cur_r > dms->max_region)
|
|
return 0;
|
|
|
|
for (i = *cur_r; i <= dms->max_region; i++) {
|
|
region = &dms->regions[i];
|
|
if (!_stats_region_present(region))
|
|
continue;
|
|
if ((*flags & DM_STATS_WALK_SKIP_SINGLE_AREA)
|
|
&& !(*flags & DM_STATS_WALK_AREA))
|
|
if (_nr_areas_region(region) < 2)
|
|
continue;
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static void _stats_walk_end_areas(const struct dm_stats *dms, uint64_t *flags,
|
|
uint64_t *cur_r, uint64_t *cur_a,
|
|
uint64_t *cur_g)
|
|
{
|
|
int end = !_stats_walk_any_unskipped(dms, flags, cur_r, cur_a);
|
|
|
|
if (!(*flags & DM_STATS_WALK_AREA))
|
|
return;
|
|
|
|
if (!end)
|
|
return;
|
|
|
|
*flags &= ~DM_STATS_WALK_AREA;
|
|
if (*flags & DM_STATS_WALK_REGION) {
|
|
/* start region walk */
|
|
*cur_a = DM_STATS_WALK_REGION;
|
|
*cur_r = DM_STATS_REGION_NOT_PRESENT;
|
|
_stats_walk_next_present(dms, flags, cur_r, cur_a, cur_g);
|
|
if (!_stats_walk_any_unskipped(dms, flags, cur_r, cur_a)) {
|
|
/* no more regions */
|
|
*flags &= ~DM_STATS_WALK_REGION;
|
|
if (!(*flags & DM_STATS_WALK_GROUP))
|
|
*cur_r = dms->max_region;
|
|
}
|
|
}
|
|
|
|
if (*flags & DM_STATS_WALK_REGION)
|
|
return;
|
|
|
|
if (*flags & DM_STATS_WALK_GROUP)
|
|
_group_walk_start(dms, flags, cur_r, cur_a, cur_g);
|
|
}
|
|
|
|
static int _stats_walk_end(const struct dm_stats *dms, uint64_t *flags,
|
|
uint64_t *cur_r, uint64_t *cur_a, uint64_t *cur_g)
|
|
{
|
|
if (*flags & DM_STATS_WALK_AREA) {
|
|
_stats_walk_end_areas(dms, flags, cur_r, cur_a, cur_g);
|
|
goto out;
|
|
}
|
|
|
|
if (*flags & DM_STATS_WALK_REGION) {
|
|
if (!_stats_walk_any_unskipped(dms, flags, cur_r, cur_a)) {
|
|
*flags &= ~DM_STATS_WALK_REGION;
|
|
_group_walk_start(dms, flags, cur_r, cur_a, cur_g);
|
|
}
|
|
goto out;
|
|
}
|
|
|
|
if (*flags & DM_STATS_WALK_GROUP) {
|
|
if (*cur_g <= dms->max_region)
|
|
goto out;
|
|
*flags &= ~DM_STATS_WALK_GROUP;
|
|
}
|
|
out:
|
|
return !(*flags & ~DM_STATS_WALK_SKIP_SINGLE_AREA);
|
|
}
|
|
|
|
int dm_stats_walk_end(struct dm_stats *dms)
|
|
{
|
|
if (!dms)
|
|
return 1;
|
|
|
|
if (_stats_walk_end(dms, &dms->cur_flags,
|
|
&dms->cur_region, &dms->cur_area,
|
|
&dms->cur_group)) {
|
|
dms->cur_flags = dms->walk_flags;
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
dm_stats_obj_type_t dm_stats_object_type(const struct dm_stats *dms,
|
|
uint64_t region_id,
|
|
uint64_t area_id)
|
|
{
|
|
uint64_t group_id;
|
|
|
|
region_id = (region_id == DM_STATS_REGION_CURRENT)
|
|
? dms->cur_region : region_id ;
|
|
area_id = (area_id == DM_STATS_AREA_CURRENT)
|
|
? dms->cur_area : area_id ;
|
|
|
|
if (region_id == DM_STATS_REGION_NOT_PRESENT)
|
|
/* no region */
|
|
return DM_STATS_OBJECT_TYPE_NONE;
|
|
|
|
if (region_id & DM_STATS_WALK_GROUP) {
|
|
if (region_id == DM_STATS_WALK_GROUP)
|
|
/* indirect group_id from cursor */
|
|
group_id = dms->cur_group;
|
|
else
|
|
/* immediate group_id encoded in region_id */
|
|
group_id = region_id & ~DM_STATS_WALK_GROUP;
|
|
if (!_stats_group_id_present(dms, group_id))
|
|
return DM_STATS_OBJECT_TYPE_NONE;
|
|
return DM_STATS_OBJECT_TYPE_GROUP;
|
|
}
|
|
|
|
if (region_id > dms->max_region)
|
|
/* end of table */
|
|
return DM_STATS_OBJECT_TYPE_NONE;
|
|
|
|
if (area_id & DM_STATS_WALK_REGION)
|
|
/* aggregate region */
|
|
return DM_STATS_OBJECT_TYPE_REGION;
|
|
|
|
/* plain region_id and area_id */
|
|
return DM_STATS_OBJECT_TYPE_AREA;
|
|
}
|
|
|
|
dm_stats_obj_type_t dm_stats_current_object_type(const struct dm_stats *dms)
|
|
{
|
|
/* dm_stats_object_type will decode region/area */
|
|
return dm_stats_object_type(dms,
|
|
DM_STATS_REGION_CURRENT,
|
|
DM_STATS_AREA_CURRENT);
|
|
}
|
|
|
|
uint64_t dm_stats_get_region_nr_areas(const struct dm_stats *dms,
|
|
uint64_t region_id)
|
|
{
|
|
struct dm_stats_region *region = NULL;
|
|
|
|
/* groups or aggregate regions cannot be subdivided */
|
|
if (region_id & DM_STATS_WALK_GROUP)
|
|
return 1;
|
|
|
|
region = &dms->regions[region_id];
|
|
return _nr_areas_region(region);
|
|
}
|
|
|
|
uint64_t dm_stats_get_current_nr_areas(const struct dm_stats *dms)
|
|
{
|
|
/* groups or aggregate regions cannot be subdivided */
|
|
if (dms->cur_region & DM_STATS_WALK_GROUP)
|
|
return 1;
|
|
|
|
return dm_stats_get_region_nr_areas(dms, dms->cur_region);
|
|
}
|
|
|
|
uint64_t dm_stats_get_nr_areas(const struct dm_stats *dms)
|
|
{
|
|
uint64_t nr_areas = 0, flags = DM_STATS_WALK_AREA;
|
|
/* use a separate cursor */
|
|
uint64_t cur_region = 0, cur_area = 0, cur_group = 0;
|
|
|
|
/* no regions to visit? */
|
|
if (!dms->regions)
|
|
return 0;
|
|
|
|
flags = DM_STATS_WALK_AREA;
|
|
_stats_walk_start(dms, &flags, &cur_region, &cur_area, &cur_group);
|
|
do {
|
|
nr_areas += dm_stats_get_current_nr_areas(dms);
|
|
_stats_walk_next(dms, &flags,
|
|
&cur_region, &cur_area,
|
|
&cur_group);
|
|
} while (!_stats_walk_end(dms, &flags,
|
|
&cur_region, &cur_area,
|
|
&cur_group));
|
|
return nr_areas;
|
|
}
|
|
|
|
int dm_stats_group_present(const struct dm_stats *dms, uint64_t group_id)
|
|
{
|
|
return _stats_group_id_present(dms, group_id);
|
|
}
|
|
|
|
int dm_stats_get_region_nr_histogram_bins(const struct dm_stats *dms,
|
|
uint64_t region_id)
|
|
{
|
|
region_id = (region_id == DM_STATS_REGION_CURRENT)
|
|
? dms->cur_region : region_id ;
|
|
|
|
/* FIXME: support group histograms if all region bounds match */
|
|
if (region_id & DM_STATS_WALK_GROUP)
|
|
return 0;
|
|
|
|
if (!dms->regions[region_id].bounds)
|
|
return 0;
|
|
|
|
return dms->regions[region_id].bounds->nr_bins;
|
|
}
|
|
|
|
/*
|
|
* Fill buf with a list of set regions in the regions bitmap. Consecutive
|
|
* ranges of set region IDs are output using "M-N" range notation.
|
|
*
|
|
* The number of bytes consumed is returned or zero on error.
|
|
*/
|
|
static size_t _stats_group_tag_fill(const struct dm_stats *dms,
|
|
dm_bitset_t regions,
|
|
char *buf, size_t buflen)
|
|
{
|
|
int i, j, r, next, last = 0;
|
|
size_t used = 0;
|
|
|
|
last = dm_bit_get_last(regions);
|
|
|
|
i = dm_bit_get_first(regions);
|
|
for(; i >= 0; i = dm_bit_get_next(regions, i)) {
|
|
/* find range end */
|
|
j = i;
|
|
do
|
|
next = j + 1;
|
|
while ((j = dm_bit_get_next(regions, j)) == next);
|
|
|
|
/* set to last set bit */
|
|
j = next - 1;
|
|
|
|
/* handle range vs. single region */
|
|
if (i != j)
|
|
r = dm_snprintf(buf, buflen, FMTu64 "-" FMTu64 "%s",
|
|
(uint64_t) i, (uint64_t) j,
|
|
(j == last) ? "" : ",");
|
|
else
|
|
r = dm_snprintf(buf, buflen, FMTu64 "%s", (uint64_t) i,
|
|
(i == last) ? "" : ",");
|
|
if (r < 0)
|
|
goto_bad;
|
|
|
|
i = next; /* skip handled bits if in range */
|
|
|
|
buf += r;
|
|
used += r;
|
|
}
|
|
|
|
return used;
|
|
bad:
|
|
log_error("Could not format group list.");
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Calculate the space required to hold a string description of the group
|
|
* described by the regions bitset using comma separated list in range
|
|
* notation ("A,B,C,M-N").
|
|
*/
|
|
static size_t _stats_group_tag_len(const struct dm_stats *dms,
|
|
dm_bitset_t regions)
|
|
{
|
|
int64_t i, j, next, nr_regions = 0;
|
|
size_t buflen = 0, id_len = 0;
|
|
|
|
/* check region ids and find last set bit */
|
|
i = dm_bit_get_first(regions);
|
|
for (; i >= 0; i = dm_bit_get_next(regions, i)) {
|
|
/* length of region_id or range start in characters */
|
|
id_len = (i) ? 1 + (size_t) log10(i) : 1;
|
|
buflen += id_len;
|
|
j = i;
|
|
do
|
|
next = j + 1;
|
|
while ((j = dm_bit_get_next(regions, j)) == next);
|
|
|
|
/* set to last set bit */
|
|
j = next - 1;
|
|
|
|
nr_regions += j - i + 1;
|
|
|
|
/* handle range */
|
|
if (i != j) {
|
|
/* j is always > i, which is always >= 0 */
|
|
id_len = 1 + (size_t) log10(j);
|
|
buflen += id_len + 1; /* range end plus "-" */
|
|
}
|
|
buflen++;
|
|
i = next; /* skip bits if handling range */
|
|
}
|
|
return buflen;
|
|
}
|
|
|
|
/*
|
|
* Build a DMS_GROUP="..." tag for the group specified by group_id,
|
|
* to be stored in the corresponding region's aux_data field.
|
|
*/
|
|
static char *_build_group_tag(struct dm_stats *dms, uint64_t group_id)
|
|
{
|
|
char *aux_string, *buf;
|
|
dm_bitset_t regions;
|
|
const char *alias;
|
|
size_t buflen = 0;
|
|
int r;
|
|
|
|
regions = dms->groups[group_id].regions;
|
|
alias = dms->groups[group_id].alias;
|
|
|
|
buflen = _stats_group_tag_len(dms, regions);
|
|
|
|
if (!buflen)
|
|
return_0;
|
|
|
|
buflen += DMS_GROUP_TAG_LEN;
|
|
buflen += 1 + (alias ? strlen(alias) + 2 : 0); /* 'alias:' */
|
|
|
|
buf = aux_string = dm_malloc(buflen);
|
|
if (!buf) {
|
|
log_error("Could not allocate memory for aux_data string.");
|
|
return NULL;
|
|
}
|
|
|
|
if (!_dm_strncpy(buf, DMS_GROUP_TAG, DMS_GROUP_TAG_LEN + 1))
|
|
goto_bad;
|
|
|
|
buf += DMS_GROUP_TAG_LEN;
|
|
buflen -= DMS_GROUP_TAG_LEN;
|
|
|
|
if (alias)
|
|
r = dm_snprintf(buf, buflen, "\"%s\"%c", alias, DMS_GROUP_SEP);
|
|
else
|
|
r = dm_snprintf(buf, buflen, "%c", DMS_GROUP_SEP);
|
|
|
|
if (r < 0)
|
|
goto_bad;
|
|
|
|
buf += r;
|
|
buflen -= r;
|
|
|
|
r = _stats_group_tag_fill(dms, regions, buf, buflen);
|
|
if (!r)
|
|
goto_bad;
|
|
|
|
return aux_string;
|
|
bad:
|
|
log_error("Could not format group aux_data.");
|
|
dm_free(aux_string);
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Store updated aux_data for a region. The aux_data is passed to the
|
|
* kernel using the @stats_set_aux message. Any required group tag is
|
|
* generated from the current group table and included in the message.
|
|
*/
|
|
static int _stats_set_aux(struct dm_stats *dms,
|
|
uint64_t region_id, const char *user_data)
|
|
{
|
|
char *group_tag = NULL, *group_tag_escaped = NULL;
|
|
struct dm_task *dmt = NULL;
|
|
char msg[STATS_MSG_BUF_LEN];
|
|
int r = 0;
|
|
|
|
/* group data required? */
|
|
if (_stats_group_id_present(dms, region_id)) {
|
|
group_tag = _build_group_tag(dms, region_id);
|
|
if (!group_tag) {
|
|
log_error("Could not build group descriptor for "
|
|
"region ID " FMTu64, region_id);
|
|
goto bad;
|
|
}
|
|
group_tag_escaped = _stats_escape_aux_data(group_tag);
|
|
if (!group_tag_escaped)
|
|
goto bad;
|
|
}
|
|
|
|
if (dm_snprintf(msg, sizeof(msg), "@stats_set_aux " FMTu64 " %s%s%s ",
|
|
region_id, (group_tag_escaped) ? group_tag_escaped : "",
|
|
(group_tag_escaped) ? DMS_AUX_SEP : "",
|
|
(strlen(user_data)) ? user_data : "-") < 0) {
|
|
log_error("Could not prepare @stats_set_aux message");
|
|
goto bad;
|
|
}
|
|
|
|
if (!(dmt = _stats_send_message(dms, msg)))
|
|
goto_bad;
|
|
|
|
/* no response to a @stats_set_aux message */
|
|
dm_task_destroy(dmt);
|
|
|
|
r = 1;
|
|
bad:
|
|
dm_free(group_tag_escaped);
|
|
dm_free(group_tag);
|
|
|
|
return r;
|
|
}
|
|
|
|
/*
|
|
* Maximum length of a "start+end" range string:
|
|
* Two 20 digit uint64_t, '+', and NULL.
|
|
*/
|
|
#define RANGE_LEN 42
|
|
static int _stats_create_region(struct dm_stats *dms, uint64_t *region_id,
|
|
uint64_t start, uint64_t len, int64_t step,
|
|
int precise, const char *hist_arg,
|
|
const char *program_id, const char *aux_data)
|
|
{
|
|
char msg[STATS_MSG_BUF_LEN], range[RANGE_LEN], *endptr = NULL;
|
|
const char *err = NULL;
|
|
const char *precise_str = PRECISE_ARG;
|
|
const char *resp, *opt_args = NULL;
|
|
char *aux_data_escaped = NULL;
|
|
struct dm_task *dmt = NULL;
|
|
int r = 0, nr_opt = 0;
|
|
|
|
if (!_stats_bound(dms))
|
|
return_0;
|
|
|
|
if (!program_id || !strlen(program_id))
|
|
program_id = dms->program_id;
|
|
|
|
if (start || len) {
|
|
if (dm_snprintf(range, sizeof(range), FMTu64 "+" FMTu64,
|
|
start, len) < 0) {
|
|
err ="range";
|
|
goto_bad;
|
|
}
|
|
}
|
|
|
|
if (precise < 0)
|
|
precise = dms->precise;
|
|
|
|
if (precise)
|
|
nr_opt++;
|
|
else
|
|
precise_str = "";
|
|
|
|
if (hist_arg)
|
|
nr_opt++;
|
|
else
|
|
hist_arg = "";
|
|
|
|
aux_data_escaped = _stats_escape_aux_data(aux_data);
|
|
if (!aux_data_escaped)
|
|
return_0;
|
|
|
|
if (nr_opt) {
|
|
if ((dm_asprintf((char **)&opt_args, "%d %s %s%s", nr_opt,
|
|
precise_str,
|
|
(strlen(hist_arg)) ? HISTOGRAM_ARG : "",
|
|
hist_arg)) < 0) {
|
|
err = PRECISE_ARG " option.";
|
|
goto_bad;
|
|
}
|
|
} else
|
|
opt_args = dm_strdup("");
|
|
|
|
if (dm_snprintf(msg, sizeof(msg), "@stats_create %s %s" FMTu64
|
|
" %s %s %s", (start || len) ? range : "-",
|
|
(step < 0) ? "/" : "",
|
|
(uint64_t)llabs(step),
|
|
opt_args, program_id, aux_data) < 0) {
|
|
err = "message";
|
|
goto_bad;
|
|
}
|
|
|
|
if (!(dmt = _stats_send_message(dms, msg)))
|
|
goto_out;
|
|
|
|
resp = dm_task_get_message_response(dmt);
|
|
if (!resp) {
|
|
log_error("Could not parse empty @stats_create response.");
|
|
goto out;
|
|
}
|
|
|
|
if (region_id) {
|
|
errno = 0;
|
|
*region_id = strtoull(resp, &endptr, 10);
|
|
if (errno || resp == endptr)
|
|
goto_out;
|
|
}
|
|
|
|
r = 1;
|
|
goto out;
|
|
bad:
|
|
log_error("Could not prepare @stats_create %s.", err);
|
|
out:
|
|
if (dmt)
|
|
dm_task_destroy(dmt);
|
|
dm_free((void *) opt_args);
|
|
dm_free(aux_data_escaped);
|
|
|
|
return r;
|
|
}
|
|
|
|
DM_EXPORT_NEW_SYMBOL(int, dm_stats_create_region, 1_02_107)
|
|
(struct dm_stats *dms, uint64_t *region_id,
|
|
uint64_t start, uint64_t len, int64_t step,
|
|
int precise, struct dm_histogram *bounds,
|
|
const char *program_id, const char *user_data)
|
|
{
|
|
char *hist_arg = NULL;
|
|
int r = 0;
|
|
|
|
/* Nanosecond counters and histograms both need precise_timestamps. */
|
|
if ((precise || bounds) && !_stats_check_precise_timestamps(dms))
|
|
return_0;
|
|
|
|
if (bounds) {
|
|
/* _build_histogram_arg enables precise if vals < 1ms. */
|
|
if (!(hist_arg = _build_histogram_arg(bounds, &precise)))
|
|
goto_out;
|
|
}
|
|
|
|
r = _stats_create_region(dms, region_id, start, len, step,
|
|
precise, hist_arg, program_id, user_data);
|
|
dm_free(hist_arg);
|
|
|
|
out:
|
|
return r;
|
|
}
|
|
|
|
|
|
static void _stats_clear_group_regions(struct dm_stats *dms, uint64_t group_id)
|
|
{
|
|
struct dm_stats_group *group;
|
|
uint64_t i;
|
|
|
|
group = &dms->groups[group_id];
|
|
for (i = dm_bit_get_first(group->regions);
|
|
i != DM_STATS_GROUP_NOT_PRESENT;
|
|
i = dm_bit_get_next(group->regions, i))
|
|
dms->regions[i].group_id = DM_STATS_GROUP_NOT_PRESENT;
|
|
}
|
|
|
|
static int _stats_remove_region_id_from_group(struct dm_stats *dms,
|
|
uint64_t region_id)
|
|
{
|
|
struct dm_stats_region *region = &dms->regions[region_id];
|
|
uint64_t group_id = region->group_id;
|
|
dm_bitset_t regions = dms->groups[group_id].regions;
|
|
|
|
if (!_stats_region_is_grouped(dms, region_id))
|
|
return_0;
|
|
|
|
dm_bit_clear(regions, region_id);
|
|
|
|
/* removing group leader? */
|
|
if (region_id == group_id) {
|
|
_stats_clear_group_regions(dms, group_id);
|
|
_stats_group_destroy(&dms->groups[group_id]);
|
|
}
|
|
|
|
return _stats_set_aux(dms, group_id, dms->regions[group_id].aux_data);
|
|
}
|
|
|
|
static int _stats_delete_region(struct dm_stats *dms, uint64_t region_id)
|
|
{
|
|
char msg[STATS_MSG_BUF_LEN];
|
|
struct dm_task *dmt;
|
|
|
|
if (_stats_region_is_grouped(dms, region_id))
|
|
if (!_stats_remove_region_id_from_group(dms, region_id)) {
|
|
log_error("Could not remove region ID " FMTu64 " from "
|
|
"group ID " FMTu64,
|
|
region_id, dms->regions[region_id].group_id);
|
|
return 0;
|
|
}
|
|
|
|
if (dm_snprintf(msg, sizeof(msg), "@stats_delete " FMTu64, region_id) < 0) {
|
|
log_error("Could not prepare @stats_delete message.");
|
|
return 0;
|
|
}
|
|
|
|
dmt = _stats_send_message(dms, msg);
|
|
if (!dmt)
|
|
return_0;
|
|
dm_task_destroy(dmt);
|
|
|
|
return 1;
|
|
}
|
|
|
|
int dm_stats_delete_region(struct dm_stats *dms, uint64_t region_id)
|
|
{
|
|
int listed = 0;
|
|
|
|
if (!_stats_bound(dms))
|
|
return_0;
|
|
|
|
/*
|
|
* To correctly delete a region, that may be part of a group, a
|
|
* listed handle is required, since the region may need to be
|
|
* removed from another region's group descriptor; earlier
|
|
* versions of the region deletion interface do not have this
|
|
* requirement since there are no dependencies between regions.
|
|
*
|
|
* Listing a previously unlisted handle has numerous
|
|
* side-effects on other calls and operations (e.g. stats
|
|
* walks), especially when returning to a function that depends
|
|
* on the state of the region table, or statistics cursor.
|
|
*
|
|
* To avoid changing the semantics of the API, and the need for
|
|
* a versioned symbol, maintain a flag indicating when a listing
|
|
* has been carried out, and drop the region table before
|
|
* returning.
|
|
*
|
|
* This ensures compatibility with programs compiled against
|
|
* earlier versions of libdm.
|
|
*/
|
|
if (!dms->regions && !(listed = dm_stats_list(dms, dms->program_id))) {
|
|
log_error("Could not obtain region list while deleting "
|
|
"region ID " FMTu64, region_id);
|
|
goto bad;
|
|
}
|
|
|
|
if (!dm_stats_get_nr_regions(dms)) {
|
|
log_error("Could not delete region ID " FMTu64 ": "
|
|
"no regions found", region_id);
|
|
goto bad;
|
|
}
|
|
|
|
/* includes invalid and special region_id values */
|
|
if (!dm_stats_region_present(dms, region_id)) {
|
|
log_error("Region ID " FMTu64 " does not exist", region_id);
|
|
goto bad;
|
|
}
|
|
|
|
if (!_stats_delete_region(dms, region_id))
|
|
goto bad;
|
|
|
|
if (!listed)
|
|
/* wipe region and mark as not present */
|
|
_stats_region_destroy(&dms->regions[region_id]);
|
|
else
|
|
/* return handle to prior state */
|
|
_stats_regions_destroy(dms);
|
|
|
|
return 1;
|
|
bad:
|
|
if (listed)
|
|
_stats_regions_destroy(dms);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int dm_stats_clear_region(struct dm_stats *dms, uint64_t region_id)
|
|
{
|
|
char msg[STATS_MSG_BUF_LEN];
|
|
struct dm_task *dmt;
|
|
|
|
if (!_stats_bound(dms))
|
|
return_0;
|
|
|
|
if (dm_snprintf(msg, sizeof(msg), "@stats_clear " FMTu64, region_id) < 0) {
|
|
log_error("Could not prepare @stats_clear message.");
|
|
return 0;
|
|
}
|
|
|
|
dmt = _stats_send_message(dms, msg);
|
|
|
|
if (!dmt)
|
|
return_0;
|
|
|
|
dm_task_destroy(dmt);
|
|
|
|
return 1;
|
|
}
|
|
|
|
static struct dm_task *_stats_print_region(struct dm_stats *dms,
|
|
uint64_t region_id, unsigned start_line,
|
|
unsigned num_lines, unsigned clear)
|
|
{
|
|
/* @stats_print[_clear] <region_id> [<start_line> <num_lines>] */
|
|
char msg[STATS_MSG_BUF_LEN], lines[RANGE_LEN];
|
|
struct dm_task *dmt = NULL;
|
|
const char *err = NULL;
|
|
|
|
if (start_line || num_lines)
|
|
if (dm_snprintf(lines, sizeof(lines),
|
|
"%u %u", start_line, num_lines) < 0) {
|
|
err = "row specification";
|
|
goto_bad;
|
|
}
|
|
|
|
if (dm_snprintf(msg, sizeof(msg), "@stats_print%s " FMTu64 " %s",
|
|
(clear) ? "_clear" : "",
|
|
region_id, (start_line || num_lines) ? lines : "") < 0) {
|
|
err = "message";
|
|
goto_bad;
|
|
}
|
|
|
|
if (!(dmt = _stats_send_message(dms, msg)))
|
|
return_NULL;
|
|
|
|
return dmt;
|
|
bad:
|
|
log_error("Could not prepare @stats_print %s.", err);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
char *dm_stats_print_region(struct dm_stats *dms, uint64_t region_id,
|
|
unsigned start_line, unsigned num_lines,
|
|
unsigned clear)
|
|
{
|
|
char *resp = NULL;
|
|
struct dm_task *dmt = NULL;
|
|
const char *response;
|
|
|
|
if (!_stats_bound(dms))
|
|
return_0;
|
|
|
|
/*
|
|
* FIXME: 'print' can be emulated for groups or aggregate regions
|
|
* by populating the handle and emitting aggregate counter data
|
|
* in the kernel print format.
|
|
*/
|
|
if (region_id == DM_STATS_WALK_GROUP)
|
|
return_0;
|
|
|
|
dmt = _stats_print_region(dms, region_id,
|
|
start_line, num_lines, clear);
|
|
|
|
if (!dmt)
|
|
return_0;
|
|
|
|
if (!(response = dm_task_get_message_response(dmt)))
|
|
goto_out;
|
|
|
|
if (!(resp = dm_pool_strdup(dms->mem, response)))
|
|
log_error("Could not allocate memory for response buffer.");
|
|
out:
|
|
dm_task_destroy(dmt);
|
|
|
|
return resp;
|
|
}
|
|
|
|
void dm_stats_buffer_destroy(struct dm_stats *dms, char *buffer)
|
|
{
|
|
dm_pool_free(dms->mem, buffer);
|
|
}
|
|
|
|
uint64_t dm_stats_get_nr_regions(const struct dm_stats *dms)
|
|
{
|
|
if (!dms)
|
|
return_0;
|
|
|
|
if (!dms->regions)
|
|
return 0;
|
|
|
|
return dms->nr_regions;
|
|
}
|
|
|
|
uint64_t dm_stats_get_nr_groups(const struct dm_stats *dms)
|
|
{
|
|
uint64_t group_id, nr_groups = 0;
|
|
|
|
if (!dms)
|
|
return_0;
|
|
|
|
/* no regions or groups? */
|
|
if (!dms->regions || !dms->groups)
|
|
return 0;
|
|
|
|
for (group_id = 0; group_id <= dms->max_region; group_id++)
|
|
if (dms->groups[group_id].group_id
|
|
!= DM_STATS_GROUP_NOT_PRESENT)
|
|
nr_groups++;
|
|
|
|
return nr_groups;
|
|
}
|
|
|
|
/**
|
|
* Test whether region_id is present in this set of stats data.
|
|
*/
|
|
int dm_stats_region_present(const struct dm_stats *dms, uint64_t region_id)
|
|
{
|
|
if (!dms->regions)
|
|
return_0;
|
|
|
|
if (region_id > dms->max_region)
|
|
return 0;
|
|
|
|
return _stats_region_present(&dms->regions[region_id]);
|
|
}
|
|
|
|
static int _dm_stats_populate_region(struct dm_stats *dms, uint64_t region_id,
|
|
const char *resp)
|
|
{
|
|
struct dm_stats_region *region = &dms->regions[region_id];
|
|
|
|
if (!_stats_bound(dms))
|
|
return_0;
|
|
|
|
if (!region) {
|
|
log_error("Cannot populate empty handle before dm_stats_list().");
|
|
return 0;
|
|
}
|
|
if (!_stats_parse_region(dms, resp, region, region->timescale)) {
|
|
log_error("Could not parse @stats_print message response.");
|
|
return 0;
|
|
}
|
|
region->region_id = region_id;
|
|
return 1;
|
|
}
|
|
|
|
int dm_stats_populate(struct dm_stats *dms, const char *program_id,
|
|
uint64_t region_id)
|
|
{
|
|
int all_regions = (region_id == DM_STATS_REGIONS_ALL);
|
|
struct dm_task *dmt = NULL; /* @stats_print task */
|
|
uint64_t saved_flags; /* saved walk flags */
|
|
const char *resp;
|
|
|
|
/*
|
|
* We are about do destroy and re-create the region table, so it
|
|
* is safe to use the cursor embedded in the stats handle: just
|
|
* save a copy of the current walk_flags to restore later.
|
|
*/
|
|
saved_flags = dms->walk_flags;
|
|
|
|
if (!_stats_bound(dms))
|
|
return_0;
|
|
|
|
if ((!all_regions) && (region_id & DM_STATS_WALK_GROUP)) {
|
|
log_error("Invalid region_id for dm_stats_populate: "
|
|
"DM_STATS_WALK_GROUP");
|
|
return 0;
|
|
}
|
|
|
|
/* allow zero-length program_id for populate */
|
|
if (!program_id)
|
|
program_id = dms->program_id;
|
|
|
|
if (all_regions && !dm_stats_list(dms, program_id)) {
|
|
log_error("Could not parse @stats_list response.");
|
|
goto bad;
|
|
} else if (!_stats_set_name_cache(dms)) {
|
|
goto_bad;
|
|
}
|
|
|
|
if (!dms->nr_regions) {
|
|
log_verbose("No stats regions registered: %s", dms->name);
|
|
return 0;
|
|
}
|
|
|
|
dms->walk_flags = DM_STATS_WALK_REGION;
|
|
dm_stats_walk_start(dms);
|
|
do {
|
|
region_id = (all_regions)
|
|
? dm_stats_get_current_region(dms) : region_id;
|
|
|
|
/* obtain all lines and clear counter values */
|
|
if (!(dmt = _stats_print_region(dms, region_id, 0, 0, 1)))
|
|
goto_bad;
|
|
|
|
resp = dm_task_get_message_response(dmt);
|
|
if (!_dm_stats_populate_region(dms, region_id, resp)) {
|
|
dm_task_destroy(dmt);
|
|
goto_bad;
|
|
}
|
|
|
|
dm_task_destroy(dmt);
|
|
dm_stats_walk_next(dms);
|
|
|
|
} while (all_regions && !dm_stats_walk_end(dms));
|
|
|
|
dms->walk_flags = saved_flags;
|
|
return 1;
|
|
|
|
bad:
|
|
dms->walk_flags = saved_flags;
|
|
_stats_regions_destroy(dms);
|
|
dms->regions = NULL;
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* destroy a dm_stats object and all associated regions and counter sets.
|
|
*/
|
|
void dm_stats_destroy(struct dm_stats *dms)
|
|
{
|
|
if (!dms)
|
|
return;
|
|
|
|
_stats_regions_destroy(dms);
|
|
_stats_groups_destroy(dms);
|
|
_stats_clear_binding(dms);
|
|
dm_pool_destroy(dms->mem);
|
|
dm_pool_destroy(dms->hist_mem);
|
|
dm_pool_destroy(dms->group_mem);
|
|
dm_free(dms->program_id);
|
|
dm_free((char *) dms->name);
|
|
dm_free(dms);
|
|
}
|
|
|
|
/*
|
|
* Walk each area that is a member of region_id rid.
|
|
* i is a variable of type int that holds the current area_id.
|
|
*/
|
|
#define _foreach_region_area(dms, rid, i) \
|
|
for ((i) = 0; (i) < _nr_areas_region(&dms->regions[(rid)]); (i)++) \
|
|
|
|
/*
|
|
* Walk each region that is a member of group_id gid.
|
|
* i is a variable of type int that holds the current region_id.
|
|
*/
|
|
#define _foreach_group_region(dms, gid, i) \
|
|
for ((i) = dm_bit_get_first((dms)->groups[(gid)].regions); \
|
|
(i) != DM_STATS_GROUP_NOT_PRESENT; \
|
|
(i) = dm_bit_get_next((dms)->groups[(gid)].regions, (i))) \
|
|
|
|
/*
|
|
* Walk each region that is a member of group_id gid visiting each
|
|
* area within the region.
|
|
* i is a variable of type int that holds the current region_id.
|
|
* j is a variable of type int variable that holds the current area_id.
|
|
*/
|
|
#define _foreach_group_area(dms, gid, i, j) \
|
|
_foreach_group_region(dms, gid, i) \
|
|
_foreach_region_area(dms, i, j)
|
|
|
|
static uint64_t _stats_get_counter(const struct dm_stats *dms,
|
|
const struct dm_stats_counters *area,
|
|
dm_stats_counter_t counter)
|
|
{
|
|
switch(counter) {
|
|
case DM_STATS_READS_COUNT:
|
|
return area->reads;
|
|
case DM_STATS_READS_MERGED_COUNT:
|
|
return area->reads_merged;
|
|
case DM_STATS_READ_SECTORS_COUNT:
|
|
return area->read_sectors;
|
|
case DM_STATS_READ_NSECS:
|
|
return area->read_nsecs;
|
|
case DM_STATS_WRITES_COUNT:
|
|
return area->writes;
|
|
case DM_STATS_WRITES_MERGED_COUNT:
|
|
return area->writes_merged;
|
|
case DM_STATS_WRITE_SECTORS_COUNT:
|
|
return area->write_sectors;
|
|
case DM_STATS_WRITE_NSECS:
|
|
return area->write_nsecs;
|
|
case DM_STATS_IO_IN_PROGRESS_COUNT:
|
|
return area->io_in_progress;
|
|
case DM_STATS_IO_NSECS:
|
|
return area->io_nsecs;
|
|
case DM_STATS_WEIGHTED_IO_NSECS:
|
|
return area->weighted_io_nsecs;
|
|
case DM_STATS_TOTAL_READ_NSECS:
|
|
return area->total_read_nsecs;
|
|
case DM_STATS_TOTAL_WRITE_NSECS:
|
|
return area->total_write_nsecs;
|
|
case DM_STATS_NR_COUNTERS:
|
|
default:
|
|
log_error("Attempt to read invalid counter: %d", counter);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
uint64_t dm_stats_get_counter(const struct dm_stats *dms,
|
|
dm_stats_counter_t counter,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
uint64_t i, j, sum = 0; /* aggregation */
|
|
int sum_regions = 0;
|
|
struct dm_stats_region *region;
|
|
struct dm_stats_counters *area;
|
|
|
|
region_id = (region_id == DM_STATS_REGION_CURRENT)
|
|
? dms->cur_region : region_id ;
|
|
area_id = (area_id == DM_STATS_REGION_CURRENT)
|
|
? dms->cur_area : area_id ;
|
|
|
|
sum_regions = !!(region_id & DM_STATS_WALK_GROUP);
|
|
|
|
if (region_id == DM_STATS_WALK_GROUP)
|
|
/* group walk using the cursor */
|
|
region_id = dms->cur_group;
|
|
else if (region_id & DM_STATS_WALK_GROUP)
|
|
/* group walk using immediate group_id */
|
|
region_id &= ~DM_STATS_WALK_GROUP;
|
|
region = &dms->regions[region_id];
|
|
|
|
/*
|
|
* All statistics aggregation takes place here: aggregate metrics
|
|
* are calculated as normal using the aggregated counter values
|
|
* returned for the region or group specified.
|
|
*/
|
|
|
|
if (_stats_region_is_grouped(dms, region_id) && (sum_regions)) {
|
|
/* group */
|
|
if (area_id & DM_STATS_WALK_GROUP)
|
|
_foreach_group_area(dms, region->group_id, i, j) {
|
|
area = &dms->regions[i].counters[j];
|
|
sum += _stats_get_counter(dms, area, counter);
|
|
}
|
|
else
|
|
_foreach_group_region(dms, region->group_id, i) {
|
|
area = &dms->regions[i].counters[area_id];
|
|
sum += _stats_get_counter(dms, area, counter);
|
|
}
|
|
} else if (area_id == DM_STATS_WALK_REGION) {
|
|
/* aggregate region */
|
|
_foreach_region_area(dms, region_id, j) {
|
|
area = &dms->regions[region_id].counters[j];
|
|
sum += _stats_get_counter(dms, area, counter);
|
|
}
|
|
} else {
|
|
/* plain region / area */
|
|
area = ®ion->counters[area_id];
|
|
sum = _stats_get_counter(dms, area, counter);
|
|
}
|
|
|
|
return sum;
|
|
}
|
|
|
|
/*
|
|
* Methods for accessing named counter fields. All methods share the
|
|
* following naming scheme and prototype:
|
|
*
|
|
* uint64_t dm_stats_get_COUNTER(const struct dm_stats *, uint64_t, uint64_t)
|
|
*
|
|
* Where the two integer arguments are the region_id and area_id
|
|
* respectively.
|
|
*
|
|
* name is the name of the counter (lower case)
|
|
* counter is the part of the enum name following DM_STATS_ (upper case)
|
|
*/
|
|
#define MK_STATS_GET_COUNTER_FN(name, counter) \
|
|
uint64_t dm_stats_get_ ## name(const struct dm_stats *dms, \
|
|
uint64_t region_id, uint64_t area_id) \
|
|
{ \
|
|
return dm_stats_get_counter(dms, DM_STATS_ ## counter, \
|
|
region_id, area_id); \
|
|
}
|
|
|
|
MK_STATS_GET_COUNTER_FN(reads, READS_COUNT)
|
|
MK_STATS_GET_COUNTER_FN(reads_merged, READS_MERGED_COUNT)
|
|
MK_STATS_GET_COUNTER_FN(read_sectors, READ_SECTORS_COUNT)
|
|
MK_STATS_GET_COUNTER_FN(read_nsecs, READ_NSECS)
|
|
MK_STATS_GET_COUNTER_FN(writes, WRITES_COUNT)
|
|
MK_STATS_GET_COUNTER_FN(writes_merged, WRITES_MERGED_COUNT)
|
|
MK_STATS_GET_COUNTER_FN(write_sectors, WRITE_SECTORS_COUNT)
|
|
MK_STATS_GET_COUNTER_FN(write_nsecs, WRITE_NSECS)
|
|
MK_STATS_GET_COUNTER_FN(io_in_progress, IO_IN_PROGRESS_COUNT)
|
|
MK_STATS_GET_COUNTER_FN(io_nsecs, IO_NSECS)
|
|
MK_STATS_GET_COUNTER_FN(weighted_io_nsecs, WEIGHTED_IO_NSECS)
|
|
MK_STATS_GET_COUNTER_FN(total_read_nsecs, TOTAL_READ_NSECS)
|
|
MK_STATS_GET_COUNTER_FN(total_write_nsecs, TOTAL_WRITE_NSECS)
|
|
#undef MK_STATS_GET_COUNTER_FN
|
|
|
|
/*
|
|
* Floating point stats metric functions
|
|
*
|
|
* Called from dm_stats_get_metric() to calculate the value of
|
|
* the requested metric.
|
|
*
|
|
* int _metric_name(const struct dm_stats *dms,
|
|
* struct dm_stats_counters *c,
|
|
* double *value);
|
|
*
|
|
* Calculate a metric value from the counter data for the given
|
|
* identifiers and store it in the memory pointed to by value,
|
|
* applying group or region aggregation if enabled.
|
|
*
|
|
* Return one on success or zero on failure.
|
|
*
|
|
* To add a new metric:
|
|
*
|
|
* o Add a new name to the dm_stats_metric_t enum.
|
|
* o Create a _metric_fn() to calculate the new metric.
|
|
* o Add _metric_fn to the _metrics function table
|
|
* (entries in enum order).
|
|
* o Do not add a new named public function for the metric -
|
|
* users of new metrics are encouraged to convert to the enum
|
|
* based metric interface.
|
|
*
|
|
*/
|
|
|
|
static int _rd_merges_per_sec(const struct dm_stats *dms, double *rrqm,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
double mrgs;
|
|
mrgs = (double) dm_stats_get_counter(dms, DM_STATS_READS_MERGED_COUNT,
|
|
region_id, area_id);
|
|
|
|
*rrqm = mrgs / (double) dms->interval_ns;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int _wr_merges_per_sec(const struct dm_stats *dms, double *wrqm,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
double mrgs;
|
|
mrgs = (double) dm_stats_get_counter(dms, DM_STATS_WRITES_MERGED_COUNT,
|
|
region_id, area_id);
|
|
|
|
*wrqm = mrgs / (double) dms->interval_ns;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int _reads_per_sec(const struct dm_stats *dms, double *rd_s,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
double reads;
|
|
reads = (double) dm_stats_get_counter(dms, DM_STATS_READS_COUNT,
|
|
region_id, area_id);
|
|
|
|
*rd_s = (reads * NSEC_PER_SEC) / (double) dms->interval_ns;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int _writes_per_sec(const struct dm_stats *dms, double *wr_s,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
double writes;
|
|
writes = (double) dm_stats_get_counter(dms, DM_STATS_WRITES_COUNT,
|
|
region_id, area_id);
|
|
|
|
*wr_s = (writes * NSEC_PER_SEC) / (double) dms->interval_ns;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int _read_sectors_per_sec(const struct dm_stats *dms, double *rsec_s,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
double sect;
|
|
sect = (double) dm_stats_get_counter(dms, DM_STATS_READ_SECTORS_COUNT,
|
|
region_id, area_id);
|
|
|
|
*rsec_s = (sect * (double) NSEC_PER_SEC) / (double) dms->interval_ns;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int _write_sectors_per_sec(const struct dm_stats *dms, double *wsec_s,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
double sect;
|
|
sect = (double) dm_stats_get_counter(dms, DM_STATS_WRITE_SECTORS_COUNT,
|
|
region_id, area_id);
|
|
|
|
*wsec_s = (sect * (double) NSEC_PER_SEC) / (double) dms->interval_ns;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int _average_request_size(const struct dm_stats *dms, double *arqsz,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
double ios, sectors;
|
|
|
|
ios = (double) (dm_stats_get_counter(dms, DM_STATS_READS_COUNT,
|
|
region_id, area_id)
|
|
+ dm_stats_get_counter(dms, DM_STATS_WRITES_COUNT,
|
|
region_id, area_id));
|
|
sectors = (double) (dm_stats_get_counter(dms, DM_STATS_READ_SECTORS_COUNT,
|
|
region_id, area_id)
|
|
+ dm_stats_get_counter(dms, DM_STATS_WRITE_SECTORS_COUNT,
|
|
region_id, area_id));
|
|
|
|
if (ios > 0.0)
|
|
*arqsz = sectors / ios;
|
|
else
|
|
*arqsz = 0.0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int _average_queue_size(const struct dm_stats *dms, double *qusz,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
double io_ticks;
|
|
io_ticks = (double) dm_stats_get_counter(dms, DM_STATS_WEIGHTED_IO_NSECS,
|
|
region_id, area_id);
|
|
|
|
if (io_ticks > 0.0)
|
|
*qusz = io_ticks / (double) dms->interval_ns;
|
|
else
|
|
*qusz = 0.0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int _average_wait_time(const struct dm_stats *dms, double *await,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
uint64_t io_ticks, nr_ios;
|
|
|
|
io_ticks = dm_stats_get_counter(dms, DM_STATS_READ_NSECS,
|
|
region_id, area_id);
|
|
io_ticks += dm_stats_get_counter(dms, DM_STATS_WRITE_NSECS,
|
|
region_id, area_id);
|
|
|
|
nr_ios = dm_stats_get_counter(dms, DM_STATS_READS_COUNT,
|
|
region_id, area_id);
|
|
nr_ios += dm_stats_get_counter(dms, DM_STATS_WRITES_COUNT,
|
|
region_id, area_id);
|
|
|
|
if (nr_ios > 0)
|
|
*await = (double) io_ticks / (double) nr_ios;
|
|
else
|
|
*await = 0.0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int _average_rd_wait_time(const struct dm_stats *dms, double *await,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
uint64_t rd_io_ticks, nr_rd_ios;
|
|
|
|
rd_io_ticks = dm_stats_get_counter(dms, DM_STATS_READ_NSECS,
|
|
region_id, area_id);
|
|
nr_rd_ios = dm_stats_get_counter(dms, DM_STATS_READS_COUNT,
|
|
region_id, area_id);
|
|
|
|
/*
|
|
* If rd_io_ticks is > 0 this should imply that nr_rd_ios is
|
|
* also > 0 (unless a kernel bug exists). Test for both here
|
|
* before using the IO count as a divisor (Coverity).
|
|
*/
|
|
if (rd_io_ticks > 0 && nr_rd_ios > 0)
|
|
*await = (double) rd_io_ticks / (double) nr_rd_ios;
|
|
else
|
|
*await = 0.0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int _average_wr_wait_time(const struct dm_stats *dms, double *await,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
uint64_t wr_io_ticks, nr_wr_ios;
|
|
|
|
wr_io_ticks = dm_stats_get_counter(dms, DM_STATS_WRITE_NSECS,
|
|
region_id, area_id);
|
|
nr_wr_ios = dm_stats_get_counter(dms, DM_STATS_WRITES_COUNT,
|
|
region_id, area_id);
|
|
|
|
/*
|
|
* If wr_io_ticks is > 0 this should imply that nr_wr_ios is
|
|
* also > 0 (unless a kernel bug exists). Test for both here
|
|
* before using the IO count as a divisor (Coverity).
|
|
*/
|
|
if (wr_io_ticks > 0 && nr_wr_ios > 0)
|
|
*await = (double) wr_io_ticks / (double) nr_wr_ios;
|
|
else
|
|
*await = 0.0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int _throughput(const struct dm_stats *dms, double *tput,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
uint64_t nr_ios;
|
|
|
|
nr_ios = dm_stats_get_counter(dms, DM_STATS_READS_COUNT,
|
|
region_id, area_id);
|
|
nr_ios += dm_stats_get_counter(dms, DM_STATS_WRITES_COUNT,
|
|
region_id, area_id);
|
|
|
|
*tput = ((double) NSEC_PER_SEC * (double) nr_ios)
|
|
/ (double) (dms->interval_ns);
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int _utilization(const struct dm_stats *dms, double *util,
|
|
uint64_t region_id, uint64_t area_id)
|
|
{
|
|
uint64_t io_nsecs, interval_ns = dms->interval_ns;
|
|
|
|
/**
|
|
< |