mirror of
git://git.proxmox.com/git/qemu-server.git
synced 2025-03-09 08:58:25 +03:00
start fixing qmigrate (still not working)
This commit is contained in:
parent
f669a2b1fb
commit
1ef75254a1
576
qmigrate
576
qmigrate
@ -1,5 +1,8 @@
|
||||
#!/usr/bin/perl -w
|
||||
|
||||
# fixme: kvm > 88 has more migration options and verbose status
|
||||
# fixme: bwlimit ?
|
||||
|
||||
use strict;
|
||||
use Getopt::Long;
|
||||
use PVE::SafeSyslog;
|
||||
@ -7,33 +10,214 @@ use IO::Select;
|
||||
use IPC::Open3;
|
||||
use IPC::Open2;
|
||||
use PVE::Cluster;
|
||||
use PVE::INotify;
|
||||
use PVE::Tools qw(run_command);
|
||||
use PVE::JSONSchema qw(get_standard_option);
|
||||
use PVE::QemuServer;
|
||||
use PVE::Storage;
|
||||
use POSIX qw(strftime);
|
||||
use Data::Dumper; # fixme: remove
|
||||
use PVE::RESTHandler;
|
||||
|
||||
# fimxe: adopt for new cluster filestem
|
||||
use base qw(PVE::RESTHandler);
|
||||
|
||||
die "not implemented - fixme!";
|
||||
die "please run as root\n" if $> != 0;
|
||||
|
||||
# fixme: kvm > 88 has more migration options and verbose status
|
||||
$ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
|
||||
|
||||
# blowfish is a fast block cipher, much faster then 3des
|
||||
my @ssh_opts = ('-c', 'blowfish', '-o', 'BatchMode=yes');
|
||||
my @ssh_cmd = ('/usr/bin/ssh', @ssh_opts);
|
||||
my @scp_cmd = ('/usr/bin/scp', @ssh_opts);
|
||||
my $qm_cmd = '/usr/sbin/qm';
|
||||
|
||||
$ENV{RSYNC_RSH} = join(' ', @ssh_cmd);
|
||||
|
||||
my $localnode = PVE::INotify::nodename();
|
||||
|
||||
initlog('qmigrate');
|
||||
PVE::Cluster::cfs_update();
|
||||
|
||||
sub print_usage {
|
||||
my $msg = shift;
|
||||
# global vars, initialized later
|
||||
my @rem_ssh;
|
||||
my $vmid;
|
||||
my $node;
|
||||
my $nodeip;
|
||||
|
||||
print STDERR "ERROR: $msg\n" if $msg;
|
||||
print STDERR "USAGE: qmigrate [--online] [--verbose]\n";
|
||||
print STDERR " destination_address VMID\n";
|
||||
exit (-1);
|
||||
my $storecfg = PVE::Storage::config();
|
||||
|
||||
my $delayed_interrupt = 0;
|
||||
|
||||
$SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = $SIG{PIPE} = sub {
|
||||
logmsg('err', "received interrupt - delayed");
|
||||
$delayed_interrupt = 1;
|
||||
};
|
||||
|
||||
|
||||
# we only use RESTHandler for automatic parameter verification
|
||||
__PACKAGE__->register_method({
|
||||
name => 'qmigrate',
|
||||
path => 'qmigrate',
|
||||
method => 'POST',
|
||||
description => "Migrate VMs to other cluster nodes.",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
vmid => get_standard_option('pve-vmid'),
|
||||
node => get_standard_option('pve-node', {
|
||||
description => "Target node" }),
|
||||
online => {
|
||||
type => 'boolean',
|
||||
description => "Use online/live migration.",
|
||||
optional => 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'null'},
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $errors;
|
||||
|
||||
my $starttime = time();
|
||||
|
||||
# initialize global variables
|
||||
$vmid = $param->{vmid};
|
||||
$node = $param->{node};
|
||||
|
||||
die "node is local\n" if $node eq $localnode;
|
||||
|
||||
PVE::Cluster::check_cfs_quorum();
|
||||
|
||||
PVE::Cluster::check_node_exists($node);
|
||||
|
||||
$nodeip = PVE::Cluster::remote_node_ip($node);
|
||||
@rem_ssh = (@ssh_cmd, "root\@$nodeip");
|
||||
|
||||
# lock config during migration
|
||||
PVE::QemuServer::lock_config($vmid, sub {
|
||||
|
||||
eval_int(\&prepare);
|
||||
die $@ if $@;
|
||||
|
||||
my $conf = PVE::QemuServer::load_config($vmid);
|
||||
|
||||
PVE::QemuServer::check_lock($conf);
|
||||
|
||||
my $running = 0;
|
||||
if (PVE::QemuServer::check_running($vmid)) {
|
||||
die "cant migrate running VM without --online\n" if !$param->{online};
|
||||
$running = 1;
|
||||
}
|
||||
|
||||
my $rhash = {};
|
||||
eval_int (sub { phase1($conf, $rhash, $running); });
|
||||
my $err = $@;
|
||||
|
||||
if ($err) {
|
||||
if ($rhash->{clearlock}) {
|
||||
my $unset = { lock => 1 };
|
||||
eval { PVE::QemuServer::change_config_nolock($vmid, {}, $unset, 1) };
|
||||
logmsg('err', $@) if $@;
|
||||
}
|
||||
if ($rhash->{volumes}) {
|
||||
foreach my $volid (@{$rhash->{volumes}}) {
|
||||
logmsg('err', "found stale volume copy '$volid' on node '$node'");
|
||||
}
|
||||
}
|
||||
|
||||
die $err;
|
||||
}
|
||||
|
||||
# vm is now owned by other node
|
||||
my $volids = $rhash->{volumes};
|
||||
|
||||
if ($running) {
|
||||
|
||||
$rhash = {};
|
||||
eval_int(sub { phase2($conf, $rhash); });
|
||||
my $err = $@;
|
||||
|
||||
# always kill tunnel
|
||||
if ($rhash->{tunnel}) {
|
||||
eval_int(sub { finish_tunnel($rhash->{tunnel}) });
|
||||
if ($@) {
|
||||
logmsg('err', "stopping tunnel failed - $@");
|
||||
$errors = 1;
|
||||
}
|
||||
}
|
||||
|
||||
# always stop local VM - no interrupts possible
|
||||
eval { PVE::QemuServer::vm_stop($vmid, 1); };
|
||||
if ($@) {
|
||||
logmsg('err', "stopping vm failed - $@");
|
||||
$errors = 1;
|
||||
}
|
||||
|
||||
if ($err) {
|
||||
$errors = 1;
|
||||
logmsg('err', "online migrate failure - $err");
|
||||
}
|
||||
}
|
||||
|
||||
# finalize -- clear migrate lock
|
||||
eval_int(sub {
|
||||
my $cmd = [ @rem_ssh, $qm_cmd, 'unlock', $vmid ];
|
||||
run_command($cmd);
|
||||
});
|
||||
if ($@) {
|
||||
logmsg('err', "failed to clear migrate lock - $@");
|
||||
$errors = 1;
|
||||
}
|
||||
|
||||
# destroy local copies
|
||||
foreach my $volid (@$volids) {
|
||||
eval_int(sub { PVE::Storage::vdisk_free($storecfg, $volid); });
|
||||
my $err = $@;
|
||||
|
||||
if ($err) {
|
||||
logmsg('err', "removing local copy of '$volid' failed - $err");
|
||||
$errors = 1;
|
||||
|
||||
last if $err =~ /^interrupted by signal$/;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
my $err = $@;
|
||||
|
||||
my $delay = time() - $starttime;
|
||||
my $mins = int($delay/60);
|
||||
my $secs = $delay - $mins*60;
|
||||
my $hours = int($mins/60);
|
||||
$mins = $mins - $hours*60;
|
||||
|
||||
my $duration = sprintf "%02d:%02d:%02d", $hours, $mins, $secs;
|
||||
|
||||
if ($err) {
|
||||
logmsg('err', $err) if $err;
|
||||
logmsg('info', "migration aborted");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
if ($errors) {
|
||||
logmsg('info', "migration finished with problems (duration $duration)");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
logmsg('info', "migration finished successfuly (duration $duration)");
|
||||
|
||||
return undef;
|
||||
}});
|
||||
|
||||
if ((scalar (@ARGV) == 0) ||
|
||||
(scalar (@ARGV) == 1 && $ARGV[0] eq 'help')) {
|
||||
print __PACKAGE__->usage_str('qmigrate', '', ['node', 'vmid'], {}, 'long');
|
||||
} else {
|
||||
__PACKAGE__->cli_handler('qmigrate', 'qmigrate', \@ARGV, ['node', 'vmid']);
|
||||
}
|
||||
|
||||
|
||||
# fixme: bwlimit ?
|
||||
|
||||
my $opt_online;
|
||||
my $opt_verbose;
|
||||
exit(0);
|
||||
|
||||
sub logmsg {
|
||||
my ($level, $msg) = @_;
|
||||
@ -42,9 +226,9 @@ sub logmsg {
|
||||
|
||||
return if !$msg;
|
||||
|
||||
my $tstr = strftime ("%b %d %H:%M:%S", localtime);
|
||||
my $tstr = strftime("%b %d %H:%M:%S", localtime);
|
||||
|
||||
syslog ($level, $msg);
|
||||
syslog($level, $msg);
|
||||
|
||||
foreach my $line (split (/\n/, $msg)) {
|
||||
print STDOUT "$tstr $line\n";
|
||||
@ -52,53 +236,18 @@ sub logmsg {
|
||||
\*STDOUT->flush();
|
||||
}
|
||||
|
||||
if (!GetOptions ('online' => \$opt_online,
|
||||
'verbose' => \$opt_verbose)) {
|
||||
print_usage ();
|
||||
}
|
||||
|
||||
if (scalar (@ARGV) != 2) {
|
||||
print_usage ();
|
||||
}
|
||||
|
||||
my $host = shift;
|
||||
my $vmid = shift;
|
||||
|
||||
# blowfish is a fast block cipher, much faster then 3des
|
||||
my @ssh_opts = ('-c', 'blowfish', '-o', 'BatchMode=yes');
|
||||
my @ssh_cmd = ('/usr/bin/ssh', @ssh_opts);
|
||||
my @rem_ssh = (@ssh_cmd, "root\@$host");
|
||||
my @scp_cmd = ('/usr/bin/scp', @ssh_opts);
|
||||
my $qm_cmd = '/usr/sbin/qm';
|
||||
|
||||
$ENV{RSYNC_RSH} = join (' ', @ssh_cmd);
|
||||
|
||||
logmsg ('err', "illegal VMID") if $vmid !~ m/^\d+$/;
|
||||
$vmid = int ($vmid); # remove leading zeros
|
||||
|
||||
my $storecfg = PVE::Storage::config();
|
||||
|
||||
my $conffile = PVE::QemuServer::config_file ($vmid);
|
||||
|
||||
my $delayed_interrupt = 0;
|
||||
|
||||
$SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = $SIG{PIPE} = sub {
|
||||
logmsg ('err', "received interrupt - delayed");
|
||||
$delayed_interrupt = 1;
|
||||
};
|
||||
|
||||
sub eval_int {
|
||||
my ($func) = @_;
|
||||
|
||||
eval {
|
||||
local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub {
|
||||
$delayed_interrupt = 0;
|
||||
logmsg ('err', "received interrupt");
|
||||
logmsg('err', "received interrupt");
|
||||
die "interrupted by signal\n";
|
||||
};
|
||||
local $SIG{PIPE} = sub {
|
||||
$delayed_interrupt = 0;
|
||||
logmsg ('err', "received broken pipe interrupt");
|
||||
logmsg('err', "received broken pipe interrupt");
|
||||
die "interrupted by signal\n";
|
||||
};
|
||||
|
||||
@ -113,41 +262,34 @@ sub eval_int {
|
||||
|
||||
sub prepare {
|
||||
|
||||
die "VM $vmid does not exist\n" if ! -f $conffile;
|
||||
my $conffile = PVE::QemuServer::config_file($vmid);
|
||||
die "VM $vmid does not exist on this node\n" if ! -f $conffile;
|
||||
|
||||
# test ssh connection
|
||||
my $cmd = [ @rem_ssh, '/bin/true' ];
|
||||
eval { PVE::Storage::run_command ($cmd); };
|
||||
eval { run_command($cmd); };
|
||||
die "Can't connect to destination address using public key\n" if $@;
|
||||
|
||||
# test if VM already exists
|
||||
$cmd = [ @rem_ssh, $qm_cmd, 'status', $vmid ];
|
||||
my $stat = '';
|
||||
eval {
|
||||
PVE::Storage::run_command ($cmd, outfunc => sub { $stat .= shift; });
|
||||
};
|
||||
die "can't query VM status on host '$host'\n" if $@;
|
||||
|
||||
die "VM $vmid already exists on destination host\n" if $stat !~ m/^unknown$/;
|
||||
}
|
||||
|
||||
sub sync_disks {
|
||||
my ($conf, $rhash, $running) = @_;
|
||||
|
||||
logmsg ('info', "copying disk images");
|
||||
logmsg('info', "copying disk images");
|
||||
|
||||
my $res = [];
|
||||
|
||||
die "implement me";
|
||||
|
||||
eval {
|
||||
|
||||
my $volhash = {};
|
||||
|
||||
# get list from PVE::Storage (for unused volumes)
|
||||
my $dl = PVE::Storage::vdisk_list ($storecfg, undef, $vmid);
|
||||
PVE::Storage::foreach_volid ($dl, sub {
|
||||
my $dl = PVE::Storage::vdisk_list($storecfg, undef, $vmid);
|
||||
PVE::Storage::foreach_volid($dl, sub {
|
||||
my ($volid, $sid, $volname) = @_;
|
||||
|
||||
my $scfg = PVE::Storage::storage_config ($storecfg, $sid);
|
||||
my $scfg = PVE::Storage::storage_config($storecfg, $sid);
|
||||
|
||||
return if $scfg->{shared};
|
||||
|
||||
@ -160,22 +302,22 @@ sub sync_disks {
|
||||
PVE::QemuServer::foreach_drive($conf, sub {
|
||||
my ($ds, $drive) = @_;
|
||||
|
||||
return if PVE::QemuServer::drive_is_cdrom ($drive);
|
||||
return if PVE::QemuServer::drive_is_cdrom($drive);
|
||||
|
||||
my $volid = $drive->{file};
|
||||
|
||||
return if !$volid;
|
||||
die "cant migrate local file/device '$volid'\n" if $volid =~ m|^/|;
|
||||
|
||||
my ($sid, $volname) = PVE::Storage::parse_volume_id ($volid);
|
||||
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
|
||||
|
||||
my $scfg = PVE::Storage::storage_config ($storecfg, $sid);
|
||||
my $scfg = PVE::Storage::storage_config($storecfg, $sid);
|
||||
|
||||
return if $scfg->{shared};
|
||||
|
||||
$sharedvm = 0;
|
||||
|
||||
my ($path, $owner) = PVE::Storage::path ($storecfg, $volid);
|
||||
my ($path, $owner) = PVE::Storage::path($storecfg, $volid);
|
||||
|
||||
die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
|
||||
if !$owner || ($owner != $vmid);
|
||||
@ -189,45 +331,122 @@ sub sync_disks {
|
||||
|
||||
# do some checks first
|
||||
foreach my $volid (keys %$volhash) {
|
||||
my ($sid, $volname) = PVE::Storage::parse_volume_id ($volid);
|
||||
my $scfg = PVE::Storage::storage_config ($storecfg, $sid);
|
||||
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
|
||||
my $scfg = PVE::Storage::storage_config($storecfg, $sid);
|
||||
|
||||
die "can't migrate '$volid' - storagy type '$scfg->{type}' not supported\n"
|
||||
if $scfg->{type} ne 'dir';
|
||||
}
|
||||
|
||||
foreach my $volid (keys %$volhash) {
|
||||
my ($sid, $volname) = PVE::Storage::parse_volume_id ($volid);
|
||||
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
|
||||
push @{$rhash->{volumes}}, $volid;
|
||||
PVE::Storage::storage_migrate ($storecfg, $volid, $host, $sid);
|
||||
PVE::Storage::storage_migrate($storecfg, $volid, $nodeip, $sid);
|
||||
}
|
||||
|
||||
};
|
||||
die "Failed to sync data - $@" if $@;
|
||||
}
|
||||
|
||||
sub fork_command_pipe {
|
||||
my ($cmd) = @_;
|
||||
|
||||
my $reader = IO::File->new();
|
||||
my $writer = IO::File->new();
|
||||
|
||||
my $orig_pid = $$;
|
||||
|
||||
my $cpid;
|
||||
|
||||
eval { $cpid = open2($reader, $writer, @$cmd); };
|
||||
|
||||
my $err = $@;
|
||||
|
||||
# catch exec errors
|
||||
if ($orig_pid != $$) {
|
||||
logmsg('err', "can't fork command pipe\n");
|
||||
POSIX::_exit(1);
|
||||
kill('KILL', $$);
|
||||
}
|
||||
|
||||
die $err if $err;
|
||||
|
||||
return { writer => $writer, reader => $reader, pid => $cpid };
|
||||
}
|
||||
|
||||
sub finish_command_pipe {
|
||||
my $cmdpipe = shift;
|
||||
|
||||
my $writer = $cmdpipe->{writer};
|
||||
my $reader = $cmdpipe->{reader};
|
||||
|
||||
$writer->close();
|
||||
$reader->close();
|
||||
|
||||
my $cpid = $cmdpipe->{pid};
|
||||
|
||||
kill(15, $cpid) if kill(0, $cpid);
|
||||
|
||||
waitpid($cpid, 0);
|
||||
}
|
||||
|
||||
sub run_with_timeout {
|
||||
my ($timeout, $code, @param) = @_;
|
||||
|
||||
die "got timeout\n" if $timeout <= 0;
|
||||
|
||||
my $prev_alarm;
|
||||
|
||||
my $sigcount = 0;
|
||||
|
||||
my $res;
|
||||
|
||||
eval {
|
||||
local $SIG{ALRM} = sub { $sigcount++; die "got timeout\n"; };
|
||||
local $SIG{PIPE} = sub { $sigcount++; die "broken pipe\n" };
|
||||
local $SIG{__DIE__}; # see SA bug 4631
|
||||
|
||||
$prev_alarm = alarm($timeout);
|
||||
|
||||
$res = &$code(@param);
|
||||
|
||||
alarm(0); # avoid race conditions
|
||||
};
|
||||
|
||||
my $err = $@;
|
||||
|
||||
alarm($prev_alarm) if defined($prev_alarm);
|
||||
|
||||
die "unknown error" if $sigcount && !$err; # seems to happen sometimes
|
||||
|
||||
die $err if $err;
|
||||
|
||||
return $res;
|
||||
}
|
||||
|
||||
sub fork_tunnel {
|
||||
my ($remhost, $lport, $rport) = @_;
|
||||
my ($lport, $rport) = @_;
|
||||
|
||||
my $cmd = [@ssh_cmd, '-o', 'BatchMode=yes',
|
||||
'-L', "$lport:localhost:$rport", $remhost,
|
||||
'-L', "$lport:localhost:$rport", $nodeip,
|
||||
'qm', 'mtunnel' ];
|
||||
|
||||
my $tunnel = PVE::Storage::fork_command_pipe ($cmd);
|
||||
my $tunnel = fork_command_pipe($cmd);
|
||||
|
||||
my $reader = $tunnel->{reader};
|
||||
|
||||
my $helo;
|
||||
eval {
|
||||
PVE::Storage::run_with_timeout (60, sub { $helo = <$reader>; });
|
||||
run_with_timeout(60, sub { $helo = <$reader>; });
|
||||
die "no reply\n" if !$helo;
|
||||
die "no quorum on target node\n" if $helo =~ m/^no quorum$/;
|
||||
die "got strange reply from mtunnel ('$helo')\n"
|
||||
if $helo !~ m/^tunnel online$/;
|
||||
};
|
||||
my $err = $@;
|
||||
|
||||
if ($err) {
|
||||
PVE::Storage::finish_command_pipe ($tunnel);
|
||||
finish_command_pipe($tunnel);
|
||||
die "can't open migration tunnel - $err";
|
||||
}
|
||||
return $tunnel;
|
||||
@ -239,14 +458,14 @@ sub finish_tunnel {
|
||||
my $writer = $tunnel->{writer};
|
||||
|
||||
eval {
|
||||
PVE::Storage::run_with_timeout (30, sub {
|
||||
run_with_timeout(30, sub {
|
||||
print $writer "quit\n";
|
||||
$writer->flush();
|
||||
});
|
||||
};
|
||||
my $err = $@;
|
||||
|
||||
PVE::Storage::finish_command_pipe ($tunnel);
|
||||
finish_command_pipe($tunnel);
|
||||
|
||||
die $err if $err;
|
||||
}
|
||||
@ -254,19 +473,13 @@ sub finish_tunnel {
|
||||
sub phase1 {
|
||||
my ($conf, $rhash, $running) = @_;
|
||||
|
||||
logmsg ('info', "starting migration of VM $vmid to host '$host'");
|
||||
logmsg('info', "starting migration of VM $vmid to node '$node' ($nodeip)");
|
||||
|
||||
my $loc_res = 0;
|
||||
$loc_res = 1 if $conf->{hostusb};
|
||||
$loc_res = 1 if $conf->{hostpci};
|
||||
$loc_res = 1 if $conf->{serial};
|
||||
$loc_res = 1 if $conf->{parallel};
|
||||
|
||||
if ($loc_res) {
|
||||
if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
|
||||
if ($running) {
|
||||
die "can't migrate VM which uses local devices\n";
|
||||
} else {
|
||||
logmsg ('info', "migrating VM which uses local devices");
|
||||
logmsg('info', "migrating VM which uses local devices");
|
||||
}
|
||||
}
|
||||
|
||||
@ -274,30 +487,29 @@ sub phase1 {
|
||||
$rhash->{clearlock} = 1;
|
||||
|
||||
my $settings = { lock => 'migrate' };
|
||||
PVE::QemuServer::change_config_nolock ($vmid, $settings, {}, 1);
|
||||
PVE::QemuServer::change_config_nolock($vmid, $settings, {}, 1);
|
||||
|
||||
# copy config to remote host
|
||||
eval {
|
||||
my $cmd = [ @scp_cmd, $conffile, "root\@$host:$conffile"];
|
||||
PVE::Storage::run_command ($cmd);
|
||||
$rhash->{conffile} = 1;
|
||||
};
|
||||
die "Failed to copy config file - $@" if $@;
|
||||
sync_disks($conf, $rhash, $running);
|
||||
|
||||
sync_disks ($conf, $rhash, $running);
|
||||
# move config to remote node
|
||||
my $conffile = PVE::QemuServer::config_file($vmid, $localnode);
|
||||
my $newconffile = PVE::QemuServer::config_file($vmid, $node);
|
||||
|
||||
die "Failed to move config to node '$node' - rename failed: $!\n"
|
||||
if !rename($conffile, $newconffile);
|
||||
};
|
||||
|
||||
sub phase2 {
|
||||
my ($conf, $rhash) = shift;
|
||||
|
||||
logmsg ('info', "starting VM on remote host '$host'");
|
||||
logmsg('info', "starting VM on remote node '$node'");
|
||||
|
||||
my $rport;
|
||||
|
||||
## start on remote host
|
||||
## start on remote node
|
||||
my $cmd = [@rem_ssh, $qm_cmd, '--skiplock', 'start', $vmid, '--incoming', 'tcp'];
|
||||
|
||||
PVE::Storage::run_command ($cmd, outfunc => sub {
|
||||
run_command($cmd, outfunc => sub {
|
||||
my $line = shift;
|
||||
|
||||
if ($line =~ m/^migration listens on port (\d+)$/) {
|
||||
@ -307,22 +519,23 @@ sub phase2 {
|
||||
|
||||
die "unable to detect remote migration port\n" if !$rport;
|
||||
|
||||
logmsg ('info', "starting migration tunnel");
|
||||
## create tunnel to remote port
|
||||
my $lport = PVE::QemuServer::next_migrate_port ();
|
||||
$rhash->{tunnel} = fork_tunnel ($host, $lport, $rport);
|
||||
logmsg('info', "starting migration tunnel");
|
||||
|
||||
logmsg ('info', "starting online/live migration");
|
||||
## create tunnel to remote port
|
||||
my $lport = PVE::QemuServer::next_migrate_port();
|
||||
$rhash->{tunnel} = fork_tunnel($lport, $rport);
|
||||
|
||||
logmsg('info', "starting online/live migration");
|
||||
# start migration
|
||||
|
||||
my $start = time();
|
||||
|
||||
PVE::QemuServer::vm_monitor_command ($vmid, "migrate -d \"tcp:localhost:$lport\"");
|
||||
PVE::QemuServer::vm_monitor_command($vmid, "migrate -d \"tcp:localhost:$lport\"");
|
||||
|
||||
my $lstat = '';
|
||||
while (1) {
|
||||
sleep (2);
|
||||
my $stat = PVE::QemuServer::vm_monitor_command ($vmid, "info migrate", 1);
|
||||
my $stat = PVE::QemuServer::vm_monitor_command($vmid, "info migrate", 1);
|
||||
if ($stat =~ m/^Migration status: (active|completed|failed|cancelled)$/im) {
|
||||
my $ms = $1;
|
||||
|
||||
@ -333,10 +546,10 @@ sub phase2 {
|
||||
$rem = $1 if $stat =~ m/^remaining ram: (\d+) kbytes$/im;
|
||||
$total = $1 if $stat =~ m/^total ram: (\d+) kbytes$/im;
|
||||
|
||||
logmsg ('info', "migration status: $ms (transferred ${trans}KB, " .
|
||||
logmsg('info', "migration status: $ms (transferred ${trans}KB, " .
|
||||
"remaining ${rem}KB), total ${total}KB)");
|
||||
} else {
|
||||
logmsg ('info', "migration status: $ms");
|
||||
logmsg('info', "migration status: $ms");
|
||||
}
|
||||
}
|
||||
|
||||
@ -344,7 +557,7 @@ sub phase2 {
|
||||
my $delay = time() - $start;
|
||||
if ($delay > 0) {
|
||||
my $mbps = sprintf "%.2f", $conf->{memory}/$delay;
|
||||
logmsg ('info', "migration speed: $mbps MB/s");
|
||||
logmsg('info', "migration speed: $mbps MB/s");
|
||||
}
|
||||
}
|
||||
|
||||
@ -360,145 +573,24 @@ sub phase2 {
|
||||
};
|
||||
}
|
||||
|
||||
my $errors;
|
||||
|
||||
my $starttime = time();
|
||||
|
||||
# lock config during migration
|
||||
PVE::QemuServer::lock_config ($vmid, sub {
|
||||
|
||||
eval_int (\&prepare);
|
||||
die $@ if $@;
|
||||
|
||||
my $conf = PVE::QemuServer::load_config($vmid);
|
||||
|
||||
PVE::QemuServer::check_lock ($conf);
|
||||
|
||||
my $running = 0;
|
||||
if (PVE::QemuServer::check_running ($vmid)) {
|
||||
die "cant migrate running VM without --online\n" if !$opt_online;
|
||||
$running = 1;
|
||||
}
|
||||
|
||||
my $rhash = {};
|
||||
eval_int (sub { phase1 ($conf, $rhash, $running); });
|
||||
my $err = $@;
|
||||
|
||||
if ($err) {
|
||||
if ($rhash->{clearlock}) {
|
||||
my $unset = { lock => 1 };
|
||||
eval { PVE::QemuServer::change_config_nolock ($vmid, {}, $unset, 1) };
|
||||
logmsg ('err', $@) if $@;
|
||||
}
|
||||
if ($rhash->{conffile}) {
|
||||
my $cmd = [ @rem_ssh, '/bin/rm', '-f', $conffile ];
|
||||
eval { PVE::Storage::run_command ($cmd); };
|
||||
logmsg ('err', $@) if $@;
|
||||
}
|
||||
if ($rhash->{volumes}) {
|
||||
foreach my $volid (@{$rhash->{volumes}}) {
|
||||
logmsg ('err', "found stale volume copy '$volid' on host '$host'");
|
||||
}
|
||||
}
|
||||
|
||||
die $err;
|
||||
}
|
||||
|
||||
# vm is now owned by other host
|
||||
my $volids = $rhash->{volumes};
|
||||
|
||||
if ($running) {
|
||||
|
||||
$rhash = {};
|
||||
eval_int (sub { phase2 ($conf, $rhash); });
|
||||
my $err = $@;
|
||||
|
||||
# always kill tunnel
|
||||
if ($rhash->{tunnel}) {
|
||||
eval_int (sub { finish_tunnel ($rhash->{tunnel}) });
|
||||
if ($@) {
|
||||
logmsg ('err', "stopping tunnel failed - $@");
|
||||
$errors = 1;
|
||||
}
|
||||
}
|
||||
|
||||
# always stop local VM - no interrupts possible
|
||||
eval { PVE::QemuServer::vm_stop ($vmid, 1); };
|
||||
if ($@) {
|
||||
logmsg ('err', "stopping vm failed - $@");
|
||||
$errors = 1;
|
||||
}
|
||||
|
||||
if ($err) {
|
||||
$errors = 1;
|
||||
logmsg ('err', "online migrate failure - $err");
|
||||
}
|
||||
}
|
||||
|
||||
# finalize -- clear migrate lock
|
||||
eval_int (sub {
|
||||
my $cmd = [@rem_ssh, $qm_cmd, 'unlock', $vmid ];
|
||||
PVE::Storage::run_command ($cmd);
|
||||
});
|
||||
if ($@) {
|
||||
logmsg ('err', "failed to clear migrate lock - $@");
|
||||
$errors = 1;
|
||||
}
|
||||
|
||||
unlink $conffile;
|
||||
|
||||
# destroy local copies
|
||||
foreach my $volid (@$volids) {
|
||||
eval_int (sub { PVE::Storage::vdisk_free ($storecfg, $volid); });
|
||||
my $err = $@;
|
||||
|
||||
if ($err) {
|
||||
logmsg ('err', "removing local copy of '$volid' failed - $err");
|
||||
$errors = 1;
|
||||
|
||||
last if $err =~ /^interrupted by signal$/;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
my $err = $@;
|
||||
|
||||
my $delay = time () - $starttime;
|
||||
my $mins = int ($delay/60);
|
||||
my $secs = $delay - $mins*60;
|
||||
my $hours = int ($mins/60);
|
||||
$mins = $mins - $hours*60;
|
||||
|
||||
my $duration = sprintf "%02d:%02d:%02d", $hours, $mins, $secs;
|
||||
|
||||
if ($err) {
|
||||
logmsg ('err', $err) if $err;
|
||||
logmsg ('info', "migration aborted");
|
||||
exit (-1);
|
||||
}
|
||||
|
||||
if ($errors) {
|
||||
logmsg ('info', "migration finished with problems (duration $duration)");
|
||||
exit (-1);
|
||||
}
|
||||
|
||||
logmsg ('info', "migration finished successfuly (duration $duration)");
|
||||
|
||||
exit (0);
|
||||
exit(0);
|
||||
|
||||
__END__
|
||||
|
||||
=head1 NAME
|
||||
|
||||
qmigrate - utility for VM migration between hardware nodes (kvm/qemu)
|
||||
qmigrate - utility for VM migration between cluster nodes (kvm/qemu)
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
qmigrate [--online] [--verbose] destination_address VMID
|
||||
qmigrate help
|
||||
|
||||
qmigrate [--online] target_node VMID
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
no info available.
|
||||
Migrate VMs to other cluster nodes.
|
||||
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user