1
0
mirror of https://gitlab.com/libvirt/libvirt.git synced 2024-12-22 17:34:18 +03:00

Extend virCgroupGetPercpuStats to fill in vcputime too

Currently, virCgroupGetPercpuStats is only used by the LXC driver,
filling out the CPUTIME stats. qemuDomainGetPercpuStats does this
and also filles out VCPUTIME stats.

Extend virCgroupGetPercpuStats to also report VCPUTIME stats if
nvcpupids is non-zero. In the LXC driver, we don't have cpupids.
In the QEMU driver, there is at least one cpupid for a running domain,
so the behavior shouldn't change for QEMU either.

Also rename getSumVcpuPercpuStats to virCgroupGetPercpuVcpuSum.
This commit is contained in:
Ján Tomko 2014-04-03 17:53:43 +02:00
parent 23d2d863b7
commit 897808e74f
5 changed files with 102 additions and 167 deletions

View File

@ -5658,7 +5658,7 @@ lxcDomainGetCPUStats(virDomainPtr dom,
params, nparams);
else
ret = virCgroupGetPercpuStats(priv->cgroup, params,
nparams, start_cpu, ncpus);
nparams, start_cpu, ncpus, 0);
cleanup:
if (vm)
virObjectUnlock(vm);

View File

@ -15964,165 +15964,6 @@ qemuDomainGetMetadata(virDomainPtr dom,
return ret;
}
/* This function gets the sums of cpu time consumed by all vcpus.
* For example, if there are 4 physical cpus, and 2 vcpus in a domain,
* then for each vcpu, the cpuacct.usage_percpu looks like this:
* t0 t1 t2 t3
* and we have 2 groups of such data:
* v\p 0 1 2 3
* 0 t00 t01 t02 t03
* 1 t10 t11 t12 t13
* for each pcpu, the sum is cpu time consumed by all vcpus.
* s0 = t00 + t10
* s1 = t01 + t11
* s2 = t02 + t12
* s3 = t03 + t13
*/
static int
getSumVcpuPercpuStats(virCgroupPtr group,
unsigned int nvcpupids,
unsigned long long *sum_cpu_time,
unsigned int num)
{
int ret = -1;
size_t i;
char *buf = NULL;
virCgroupPtr group_vcpu = NULL;
for (i = 0; i < nvcpupids; i++) {
char *pos;
unsigned long long tmp;
size_t j;
if (virCgroupNewVcpu(group, i, false, &group_vcpu) < 0)
goto cleanup;
if (virCgroupGetCpuacctPercpuUsage(group_vcpu, &buf) < 0)
goto cleanup;
pos = buf;
for (j = 0; j < num; j++) {
if (virStrToLong_ull(pos, &pos, 10, &tmp) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("cpuacct parse error"));
goto cleanup;
}
sum_cpu_time[j] += tmp;
}
virCgroupFree(&group_vcpu);
VIR_FREE(buf);
}
ret = 0;
cleanup:
virCgroupFree(&group_vcpu);
VIR_FREE(buf);
return ret;
}
static int
qemuDomainGetPercpuStats(virCgroupPtr group,
virTypedParameterPtr params,
unsigned int nparams,
int start_cpu,
unsigned int ncpus,
unsigned int nvcpupids)
{
int rv = -1;
size_t i;
int id, max_id;
char *pos;
char *buf = NULL;
unsigned long long *sum_cpu_time = NULL;
unsigned long long *sum_cpu_pos;
unsigned int n = 0;
virTypedParameterPtr ent;
int param_idx;
unsigned long long cpu_time;
/* return the number of supported params */
if (nparams == 0 && ncpus != 0)
return QEMU_NB_PER_CPU_STAT_PARAM;
/* To parse account file, we need to know how many cpus are present. */
max_id = nodeGetCPUCount();
if (max_id < 0)
return rv;
if (ncpus == 0) { /* returns max cpu ID */
rv = max_id;
goto cleanup;
}
if (start_cpu > max_id) {
virReportError(VIR_ERR_INVALID_ARG,
_("start_cpu %d larger than maximum of %d"),
start_cpu, max_id);
goto cleanup;
}
/* we get percpu cputime accounting info. */
if (virCgroupGetCpuacctPercpuUsage(group, &buf))
goto cleanup;
pos = buf;
/* return percpu cputime in index 0 */
param_idx = 0;
/* number of cpus to compute */
if (start_cpu >= max_id - ncpus)
id = max_id - 1;
else
id = start_cpu + ncpus - 1;
for (i = 0; i <= id; i++) {
if (virStrToLong_ull(pos, &pos, 10, &cpu_time) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("cpuacct parse error"));
goto cleanup;
} else {
n++;
}
if (i < start_cpu)
continue;
ent = &params[(i - start_cpu) * nparams + param_idx];
if (virTypedParameterAssign(ent, VIR_DOMAIN_CPU_STATS_CPUTIME,
VIR_TYPED_PARAM_ULLONG, cpu_time) < 0)
goto cleanup;
}
/* return percpu vcputime in index 1 */
if (++param_idx >= nparams) {
rv = nparams;
goto cleanup;
}
if (VIR_ALLOC_N(sum_cpu_time, n) < 0)
goto cleanup;
if (getSumVcpuPercpuStats(group, nvcpupids, sum_cpu_time, n) < 0)
goto cleanup;
sum_cpu_pos = sum_cpu_time;
for (i = 0; i <= id; i++) {
cpu_time = *(sum_cpu_pos++);
if (i < start_cpu)
continue;
if (virTypedParameterAssign(&params[(i - start_cpu) * nparams +
param_idx],
VIR_DOMAIN_CPU_STATS_VCPUTIME,
VIR_TYPED_PARAM_ULLONG,
cpu_time) < 0)
goto cleanup;
}
rv = param_idx + 1;
cleanup:
VIR_FREE(sum_cpu_time);
VIR_FREE(buf);
return rv;
}
static int
qemuDomainGetCPUStats(virDomainPtr domain,
@ -16164,8 +16005,8 @@ qemuDomainGetCPUStats(virDomainPtr domain,
ret = virCgroupGetDomainTotalCpuStats(priv->cgroup,
params, nparams);
else
ret = qemuDomainGetPercpuStats(priv->cgroup, params, nparams,
start_cpu, ncpus, priv->nvcpupids);
ret = virCgroupGetPercpuStats(priv->cgroup, params, nparams,
start_cpu, ncpus, priv->nvcpupids);
cleanup:
if (vm)
virObjectUnlock(vm);

View File

@ -2832,25 +2832,91 @@ virCgroupDenyDevicePath(virCgroupPtr group, const char *path, int perms)
}
/* This function gets the sums of cpu time consumed by all vcpus.
* For example, if there are 4 physical cpus, and 2 vcpus in a domain,
* then for each vcpu, the cpuacct.usage_percpu looks like this:
* t0 t1 t2 t3
* and we have 2 groups of such data:
* v\p 0 1 2 3
* 0 t00 t01 t02 t03
* 1 t10 t11 t12 t13
* for each pcpu, the sum is cpu time consumed by all vcpus.
* s0 = t00 + t10
* s1 = t01 + t11
* s2 = t02 + t12
* s3 = t03 + t13
*/
static int
virCgroupGetPercpuVcpuSum(virCgroupPtr group,
unsigned int nvcpupids,
unsigned long long *sum_cpu_time,
unsigned int num)
{
int ret = -1;
size_t i;
char *buf = NULL;
virCgroupPtr group_vcpu = NULL;
for (i = 0; i < nvcpupids; i++) {
char *pos;
unsigned long long tmp;
size_t j;
if (virCgroupNewVcpu(group, i, false, &group_vcpu) < 0)
goto cleanup;
if (virCgroupGetCpuacctPercpuUsage(group_vcpu, &buf) < 0)
goto cleanup;
pos = buf;
for (j = 0; j < num; j++) {
if (virStrToLong_ull(pos, &pos, 10, &tmp) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("cpuacct parse error"));
goto cleanup;
}
sum_cpu_time[j] += tmp;
}
virCgroupFree(&group_vcpu);
VIR_FREE(buf);
}
ret = 0;
cleanup:
virCgroupFree(&group_vcpu);
VIR_FREE(buf);
return ret;
}
int
virCgroupGetPercpuStats(virCgroupPtr group,
virTypedParameterPtr params,
unsigned int nparams,
int start_cpu,
unsigned int ncpus)
unsigned int ncpus,
unsigned int nvcpupids)
{
int rv = -1;
size_t i;
int id, max_id;
char *pos;
char *buf = NULL;
unsigned long long *sum_cpu_time = NULL;
unsigned long long *sum_cpu_pos;
unsigned int n = 0;
virTypedParameterPtr ent;
int param_idx;
unsigned long long cpu_time;
/* return the number of supported params */
if (nparams == 0 && ncpus != 0)
return CGROUP_NB_PER_CPU_STAT_PARAM;
if (nparams == 0 && ncpus != 0) {
if (nvcpupids == 0)
return CGROUP_NB_PER_CPU_STAT_PARAM;
else
return CGROUP_NB_PER_CPU_STAT_PARAM + 1;
}
/* To parse account file, we need to know how many cpus are present. */
max_id = nodeGetCPUCount();
@ -2888,6 +2954,8 @@ virCgroupGetPercpuStats(virCgroupPtr group,
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("cpuacct parse error"));
goto cleanup;
} else {
n++;
}
if (i < start_cpu)
continue;
@ -2897,9 +2965,34 @@ virCgroupGetPercpuStats(virCgroupPtr group,
goto cleanup;
}
if (nvcpupids == 0 || param_idx + 1 >= nparams)
goto success;
/* return percpu vcputime in index 1 */
param_idx++;
if (VIR_ALLOC_N(sum_cpu_time, n) < 0)
goto cleanup;
if (virCgroupGetPercpuVcpuSum(group, nvcpupids, sum_cpu_time, n) < 0)
goto cleanup;
sum_cpu_pos = sum_cpu_time;
for (i = 0; i <= id; i++) {
cpu_time = *(sum_cpu_pos++);
if (i < start_cpu)
continue;
if (virTypedParameterAssign(&params[(i - start_cpu) * nparams +
param_idx],
VIR_DOMAIN_CPU_STATS_VCPUTIME,
VIR_TYPED_PARAM_ULLONG,
cpu_time) < 0)
goto cleanup;
}
success:
rv = param_idx + 1;
cleanup:
VIR_FREE(sum_cpu_time);
VIR_FREE(buf);
return rv;
}

View File

@ -206,7 +206,8 @@ virCgroupGetPercpuStats(virCgroupPtr group,
virTypedParameterPtr params,
unsigned int nparams,
int start_cpu,
unsigned int ncpus);
unsigned int ncpus,
unsigned int nvcpupids);
int
virCgroupGetDomainTotalCpuStats(virCgroupPtr group,

View File

@ -560,7 +560,7 @@ static int testCgroupGetPercpuStats(const void *args ATTRIBUTE_UNUSED)
if ((rv = virCgroupGetPercpuStats(cgroup,
params,
2, 0, 1)) < 0) {
2, 0, 1, 0)) < 0) {
fprintf(stderr, "Failed call to virCgroupGetPercpuStats for /virtualmachines cgroup: %d\n", -rv);
goto cleanup;
}