mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2025-03-20 06:50:22 +03:00
qemu: Migrate memory on numatune change
We've never set the cpuset.memory_migrate value to anything, keeping it on default. However, we allow changing cpuset.mems on live domain. That setting, however, don't have any consequence on a domain unless it's going to allocate new memory. I managed to make 'virsh numatune' move all the memory to any node I wanted even without disabling libnuma's numa_set_membind(), so this should be safe to use with it as well. Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1198497 Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
This commit is contained in:
parent
ba1dfc5b6a
commit
3a0e5b0c20
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* qemu_cgroup.c: QEMU cgroup management
|
||||
*
|
||||
* Copyright (C) 2006-2014 Red Hat, Inc.
|
||||
* Copyright (C) 2006-2015 Red Hat, Inc.
|
||||
* Copyright (C) 2006 Daniel P. Berrange
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
@ -652,6 +652,9 @@ qemuSetupCpusetCgroup(virDomainObjPtr vm,
|
||||
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET))
|
||||
return 0;
|
||||
|
||||
if (virCgroupSetCpusetMemoryMigrate(priv->cgroup, true) < 0)
|
||||
return -1;
|
||||
|
||||
if (vm->def->cpumask ||
|
||||
(vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO)) {
|
||||
|
||||
@ -792,9 +795,12 @@ static void
|
||||
qemuRestoreCgroupState(virDomainObjPtr vm)
|
||||
{
|
||||
char *mem_mask = NULL;
|
||||
char *nodeset = NULL;
|
||||
int empty = -1;
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
size_t i = 0;
|
||||
virBitmapPtr all_nodes;
|
||||
virCgroupPtr cgroup_temp = NULL;
|
||||
|
||||
if (!(all_nodes = virNumaGetHostNodeset()))
|
||||
goto error;
|
||||
@ -809,9 +815,37 @@ qemuRestoreCgroupState(virDomainObjPtr vm)
|
||||
if (virCgroupSetCpusetMems(priv->cgroup, mem_mask) < 0)
|
||||
goto error;
|
||||
|
||||
for (i = 0; i < priv->nvcpupids; i++) {
|
||||
if (virCgroupNewVcpu(priv->cgroup, i, false, &cgroup_temp) < 0 ||
|
||||
virCgroupSetCpusetMemoryMigrate(cgroup_temp, true) < 0 ||
|
||||
virCgroupGetCpusetMems(cgroup_temp, &nodeset) < 0 ||
|
||||
virCgroupSetCpusetMems(cgroup_temp, nodeset) < 0)
|
||||
goto cleanup;
|
||||
|
||||
virCgroupFree(&cgroup_temp);
|
||||
}
|
||||
|
||||
for (i = 0; i < priv->niothreadpids; i++) {
|
||||
if (virCgroupNewIOThread(priv->cgroup, i + 1, false, &cgroup_temp) < 0 ||
|
||||
virCgroupSetCpusetMemoryMigrate(cgroup_temp, true) < 0 ||
|
||||
virCgroupGetCpusetMems(cgroup_temp, &nodeset) < 0 ||
|
||||
virCgroupSetCpusetMems(cgroup_temp, nodeset) < 0)
|
||||
goto cleanup;
|
||||
|
||||
virCgroupFree(&cgroup_temp);
|
||||
}
|
||||
|
||||
if (virCgroupNewEmulator(priv->cgroup, false, &cgroup_temp) < 0 ||
|
||||
virCgroupSetCpusetMemoryMigrate(cgroup_temp, true) < 0 ||
|
||||
virCgroupGetCpusetMems(cgroup_temp, &nodeset) < 0 ||
|
||||
virCgroupSetCpusetMems(cgroup_temp, nodeset) < 0)
|
||||
goto cleanup;
|
||||
|
||||
cleanup:
|
||||
VIR_FREE(mem_mask);
|
||||
VIR_FREE(nodeset);
|
||||
virBitmapFree(all_nodes);
|
||||
virCgroupFree(&cgroup_temp);
|
||||
return;
|
||||
|
||||
error:
|
||||
|
Loading…
x
Reference in New Issue
Block a user