1
0
mirror of https://gitlab.com/libvirt/libvirt.git synced 2024-12-22 17:34:18 +03:00

qemu: Memory pre-pinning support for RDMA migration

RDMA Live migration requires registering memory with the hardware, and
thus QEMU offers a new 'capability' to pre-register / mlock() the guest
memory in advance for higher RDMA performance before the migration
begins. This capability is disabled by default, which means QEMU will
register the memory with the hardware in an on-demand basis.

This patch exposes this capability with the following example usage:

virsh migrate --live --rdma-pin-all --migrateuri rdma://hostname domain qemu+ssh://hostname/system

Signed-off-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
This commit is contained in:
Michael R. Hines 2014-01-13 14:28:12 +08:00 committed by Jiri Denemark
parent ed22a47434
commit 9cc1586d2b
4 changed files with 59 additions and 1 deletions

View File

@ -1224,6 +1224,7 @@ typedef enum {
VIR_MIGRATE_COMPRESSED = (1 << 11), /* compress data during migration */
VIR_MIGRATE_ABORT_ON_ERROR = (1 << 12), /* abort migration on I/O errors happened during migration */
VIR_MIGRATE_AUTO_CONVERGE = (1 << 13), /* force convergence */
VIR_MIGRATE_RDMA_PIN_ALL = (1 << 14), /* RDMA memory pinning */
} virDomainMigrateFlags;

View File

@ -1873,6 +1873,46 @@ qemuMigrationSetAutoConverge(virQEMUDriverPtr driver,
}
static int
qemuMigrationSetPinAll(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob job)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret;
if (qemuDomainObjEnterMonitorAsync(driver, vm, job) < 0)
return -1;
ret = qemuMonitorGetMigrationCapability(
priv->mon,
QEMU_MONITOR_MIGRATION_CAPS_RDMA_PIN_ALL);
if (ret < 0) {
goto cleanup;
} else if (ret == 0) {
if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
_("rdma pinning migration is not supported by "
"target QEMU binary"));
} else {
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
_("rdma pinning migration is not supported by "
"source QEMU binary"));
}
ret = -1;
goto cleanup;
}
ret = qemuMonitorSetMigrationCapability(
priv->mon,
QEMU_MONITOR_MIGRATION_CAPS_RDMA_PIN_ALL);
cleanup:
qemuDomainObjExitMonitor(driver, vm);
return ret;
}
static int
qemuMigrationWaitForSpice(virQEMUDriverPtr driver,
virDomainObjPtr vm)
@ -2709,6 +2749,10 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
goto stop;
}
if (flags & VIR_MIGRATE_RDMA_PIN_ALL &&
qemuMigrationSetPinAll(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto stop;
if (mig->lockState) {
VIR_DEBUG("Received lockstate %s", mig->lockState);
VIR_FREE(priv->lockState);
@ -3532,6 +3576,11 @@ qemuMigrationRun(virQEMUDriverPtr driver,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup;
if (flags & VIR_MIGRATE_RDMA_PIN_ALL &&
qemuMigrationSetPinAll(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup;
if (qemuDomainObjEnterMonitorAsync(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup;

View File

@ -40,7 +40,8 @@
VIR_MIGRATE_OFFLINE | \
VIR_MIGRATE_COMPRESSED | \
VIR_MIGRATE_ABORT_ON_ERROR | \
VIR_MIGRATE_AUTO_CONVERGE)
VIR_MIGRATE_AUTO_CONVERGE | \
VIR_MIGRATE_RDMA_PIN_ALL)
/* All supported migration parameters and their types. */
# define QEMU_MIGRATION_PARAMETERS \

View File

@ -9212,6 +9212,10 @@ static const vshCmdOptDef opts_migrate[] = {
.type = VSH_OT_BOOL,
.help = N_("force convergence during live migration")
},
{.name = "rdma-pin-all",
.type = VSH_OT_BOOL,
.help = N_("support memory pinning during RDMA live migration")
},
{.name = "abort-on-error",
.type = VSH_OT_BOOL,
.help = N_("abort on soft errors during migration")
@ -9360,6 +9364,9 @@ doMigrate(void *opaque)
if (vshCommandOptBool(cmd, "auto-converge"))
flags |= VIR_MIGRATE_AUTO_CONVERGE;
if (vshCommandOptBool(cmd, "rdma-pin-all"))
flags |= VIR_MIGRATE_RDMA_PIN_ALL;
if (vshCommandOptBool(cmd, "offline")) {
flags |= VIR_MIGRATE_OFFLINE;
}