remove OpenVZ related code

There is still no OpenVZ for kernel 3.10 or newer, so we remove that code now.
This commit is contained in:
Dietmar Maurer 2015-03-27 13:10:51 +01:00
parent 7e5cb2f0b8
commit dafb62468e
13 changed files with 7 additions and 4063 deletions

View File

@ -12,8 +12,7 @@ PERLSOURCE = \
Pool.pm \
Tasks.pm \
Network.pm \
Services.pm \
OpenVZ.pm
Services.pm
all:

View File

@ -20,7 +20,6 @@ use PVE::JSONSchema qw(get_standard_option);
use PVE::AccessControl;
use PVE::Storage;
use PVE::Firewall;
use PVE::OpenVZ;
use PVE::APLInfo;
use PVE::HA::Config;
use PVE::QemuServer;
@ -31,7 +30,6 @@ use PVE::API2::Tasks;
use PVE::API2::Storage::Scan;
use PVE::API2::Storage::Status;
use PVE::API2::Qemu;
use PVE::API2::OpenVZ;
use PVE::API2::VZDump;
use PVE::API2::APT;
use PVE::API2::Ceph;
@ -50,11 +48,6 @@ __PACKAGE__->register_method ({
path => 'ceph',
});
__PACKAGE__->register_method ({
subclass => "PVE::API2::OpenVZ",
path => 'openvz',
});
__PACKAGE__->register_method ({
subclass => "PVE::API2::VZDump",
path => 'vzdump',
@ -142,7 +135,6 @@ __PACKAGE__->register_method ({
{ name => 'scan' },
{ name => 'storage' },
{ name => 'qemu' },
{ name => 'openvz' },
{ name => 'vzdump' },
{ name => 'ubcfailcnt' },
{ name => 'network' },
@ -183,47 +175,6 @@ __PACKAGE__->register_method ({
return PVE::pvecfg::version_info();
}});
__PACKAGE__->register_method({
name => 'beancounters_failcnt',
path => 'ubcfailcnt',
permissions => {
check => ['perm', '/nodes/{node}', [ 'Sys.Audit' ]],
},
method => 'GET',
proxyto => 'node',
proxyto => 'node',
protected => 1, # openvz /proc entries are only readable by root
description => "Get user_beancounters failcnt for all active containers.",
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
},
},
returns => {
type => 'array',
items => {
type => "object",
properties => {
id => { type => 'string' },
failcnt => { type => 'number' },
},
},
},
code => sub {
my ($param) = @_;
my $ubchash = PVE::OpenVZ::read_user_beancounters();
my $res = [];
foreach my $vmid (keys %$ubchash) {
next if !$vmid;
push @$res, { id => $vmid, failcnt => $ubchash->{$vmid}->{failcntsum} };
}
return $res;
}});
__PACKAGE__->register_method({
name => 'status',
path => 'status',
@ -1119,110 +1070,6 @@ __PACKAGE__->register_method({
return $res;
}});
__PACKAGE__->register_method({
name => 'apl_download',
path => 'aplinfo',
method => 'POST',
permissions => {
check => ['perm', '/storage/{storage}', ['Datastore.AllocateTemplate']],
},
description => "Download appliance templates.",
proxyto => 'node',
protected => 1,
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
storage => get_standard_option('pve-storage-id'),
template => { type => 'string', maxLength => 255 },
},
},
returns => { type => "string" },
code => sub {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
my $user = $rpcenv->get_user();
my $node = $param->{node};
my $list = PVE::APLInfo::load_data();
my $template = $param->{template};
my $pd = $list->{all}->{$template};
raise_param_exc({ template => "no such template"}) if !$pd;
my $cfg = cfs_read_file("storage.cfg");
my $scfg = PVE::Storage::storage_check_enabled($cfg, $param->{storage}, $node);
die "cannot download to storage type '$scfg->{type}'"
if !($scfg->{type} eq 'dir' || $scfg->{type} eq 'nfs');
die "unknown template type '$pd->{type}'\n" if $pd->{type} ne 'openvz';
die "storage '$param->{storage}' does not support templates\n"
if !$scfg->{content}->{vztmpl};
my $src = $pd->{location};
my $tmpldir = PVE::Storage::get_vztmpl_dir($cfg, $param->{storage});
my $dest = "$tmpldir/$template";
my $tmpdest = "$tmpldir/${template}.tmp.$$";
my $worker = sub {
my $upid = shift;
print "starting template download from: $src\n";
print "target file: $dest\n";
eval {
if (-f $dest) {
my $md5 = (split (/\s/, `md5sum '$dest'`))[0];
if ($md5 && (lc($md5) eq lc($pd->{md5sum}))) {
print "file already exists $md5 - no need to download\n";
return;
}
}
local %ENV;
my $dccfg = PVE::Cluster::cfs_read_file('datacenter.cfg');
if ($dccfg->{http_proxy}) {
$ENV{http_proxy} = $dccfg->{http_proxy};
}
my @cmd = ('/usr/bin/wget', '--progress=dot:mega', '-O', $tmpdest, $src);
if (system (@cmd) != 0) {
die "download failed - $!\n";
}
my $md5 = (split (/\s/, `md5sum '$tmpdest'`))[0];
if (!$md5 || (lc($md5) ne lc($pd->{md5sum}))) {
die "wrong checksum: $md5 != $pd->{md5sum}\n";
}
if (system ('mv', $tmpdest, $dest) != 0) {
die "unable to save file - $!\n";
}
};
my $err = $@;
unlink $tmpdest;
if ($err) {
print "\n";
die $err if $err;
}
print "download finished\n";
};
return $rpcenv->fork_worker('download', undef, $user, $worker);
}});
my $get_start_stop_list = sub {
my ($nodename, $autostart) = @_;
@ -1239,16 +1086,7 @@ my $get_start_stop_list = sub {
my $bootorder = LONG_MAX;
if ($d->{type} eq 'openvz') {
my $conf = PVE::OpenVZ::load_config($vmid);
return if $autostart && !($conf->{onboot} && $conf->{onboot}->{value});
if ($conf->{bootorder} && defined($conf->{bootorder}->{value})) {
$bootorder = $conf->{bootorder}->{value};
}
$startup = { order => $bootorder };
} elsif ($d->{type} eq 'qemu') {
if ($d->{type} eq 'qemu') {
my $conf = PVE::QemuServer::load_config($vmid);
return if $autostart && !$conf->{onboot};
@ -1331,11 +1169,7 @@ __PACKAGE__->register_method ({
my $default_delay = 0;
my $upid;
if ($d->{type} eq 'openvz') {
return if PVE::OpenVZ::check_running($vmid);
print STDERR "Starting CT $vmid\n";
$upid = PVE::API2::OpenVZ->vm_start({node => $nodename, vmid => $vmid });
} elsif ($d->{type} eq 'qemu') {
if ($d->{type} eq 'qemu') {
$default_delay = 3; # to redruce load
return if PVE::QemuServer::check_running($vmid, 1);
print STDERR "Starting VM $vmid\n";
@ -1360,11 +1194,7 @@ __PACKAGE__->register_method ({
}
}
} else {
if ($d->{type} eq 'openvz') {
print STDERR "Starting CT $vmid failed: $status\n";
} elsif ($d->{type} eq 'qemu') {
print STDERR "Starting VM $vmid failed: status\n";
}
print STDERR "Starting VM $vmid failed: status\n";
}
};
warn $@ if $@;
@ -1380,13 +1210,7 @@ my $create_stop_worker = sub {
my ($nodename, $type, $vmid, $down_timeout) = @_;
my $upid;
if ($type eq 'openvz') {
return if !PVE::OpenVZ::check_running($vmid);
my $timeout = defined($down_timeout) ? int($down_timeout) : 60;
print STDERR "Stopping CT $vmid (timeout = $timeout seconds)\n";
$upid = PVE::API2::OpenVZ->vm_shutdown({node => $nodename, vmid => $vmid,
timeout => $timeout, forceStop => 1 });
} elsif ($type eq 'qemu') {
if ($type eq 'qemu') {
return if !PVE::QemuServer::check_running($vmid, 1);
my $timeout = defined($down_timeout) ? int($down_timeout) : 60*3;
print STDERR "Stopping VM $vmid (timeout = $timeout seconds)\n";
@ -1475,12 +1299,7 @@ my $create_migrate_worker = sub {
my ($nodename, $type, $vmid, $target) = @_;
my $upid;
if ($type eq 'openvz') {
my $online = PVE::OpenVZ::check_running($vmid) ? 1 : 0;
print STDERR "Migrating CT $vmid\n";
$upid = PVE::API2::OpenVZ->migrate_vm({node => $nodename, vmid => $vmid, target => $target,
online => $online });
} elsif ($type eq 'qemu') {
if ($type eq 'qemu') {
my $online = PVE::QemuServer::check_running($vmid, 1) ? 1 : 0;
print STDERR "Migrating VM $vmid\n";
$upid = PVE::API2::Qemu->migrate_vm({node => $nodename, vmid => $vmid, target => $target,

File diff suppressed because it is too large Load Diff

View File

@ -11,8 +11,6 @@ PERLSOURCE = \
NoVncIndex.pm \
HTTPServer.pm \
REST.pm \
OpenVZ.pm \
OpenVZMigrate.pm \
APLInfo.pm \
AutoBalloon.pm \
CephTools.pm \

File diff suppressed because it is too large Load Diff

View File

@ -1,335 +0,0 @@
package PVE::OpenVZMigrate;
use strict;
use warnings;
use PVE::AbstractMigrate;
use File::Basename;
use File::Copy;
use PVE::Tools;
use PVE::INotify;
use PVE::Cluster;
use PVE::Storage;
use PVE::OpenVZ;
use base qw(PVE::AbstractMigrate);
# fixme: lock VM on target node
sub lock_vm {
my ($self, $vmid, $code, @param) = @_;
return PVE::OpenVZ::lock_container($vmid, undef, $code, @param);
}
sub prepare {
my ($self, $vmid) = @_;
my $online = $self->{opts}->{online};
$self->{storecfg} = PVE::Storage::config();
$self->{vzconf} = PVE::OpenVZ::read_global_vz_config(),
# test is VM exist
my $conf = $self->{vmconf} = PVE::OpenVZ::load_config($vmid);
my $path = PVE::OpenVZ::get_privatedir($conf, $vmid);
my ($vtype, $volid) = PVE::Storage::path_to_volume_id($self->{storecfg}, $path);
my ($storage, $volname) = PVE::Storage::parse_volume_id($volid, 1) if $volid;
die "can't determine assigned storage\n" if !$storage;
# check if storage is available on both nodes
my $scfg = PVE::Storage::storage_check_node($self->{storecfg}, $storage);
PVE::Storage::storage_check_node($self->{storecfg}, $storage, $self->{node});
# we simply use the backup dir to store temporary dump files
# Note: this is on shared storage if the storage is 'shared'
$self->{dumpdir} = PVE::Storage::get_backup_dir($self->{storecfg}, $storage);
PVE::Storage::activate_volumes($self->{storecfg}, [ $volid ]);
$self->{storage} = $storage;
$self->{privatedir} = $path;
$self->{rootdir} = PVE::OpenVZ::get_rootdir($conf, $vmid);
$self->{shared} = $scfg->{shared};
my $running = 0;
if (PVE::OpenVZ::check_running($vmid)) {
die "cant migrate running container without --online\n" if !$online;
$running = 1;
}
# fixme: test if VM uses local resources
# test ssh connection
my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
eval { $self->cmd_quiet($cmd); };
die "Can't connect to destination address using public key\n" if $@;
if ($running) {
# test if OpenVZ is running
$cmd = [ @{$self->{rem_ssh}}, '/etc/init.d/vz status' ];
eval { $self->cmd_quiet($cmd); };
die "OpenVZ is not running on the target machine\n" if $@;
# test if CPT modules are loaded for online migration
die "vzcpt module is not loaded\n" if ! -f '/proc/cpt';
$cmd = [ @{$self->{rem_ssh}}, 'test -f /proc/rst' ];
eval { $self->cmd_quiet($cmd); };
die "vzrst module is not loaded on the target machine\n" if $@;
}
# fixme: do we want to test if IPs exists on target node?
return $running;
}
sub phase1 {
my ($self, $vmid) = @_;
$self->log('info', "starting migration of CT $self->{vmid} to node '$self->{node}' ($self->{nodeip})");
my $conf = $self->{vmconf};
if ($self->{running}) {
$self->log('info', "container is running - using online migration");
}
my $cmd = [ @{$self->{rem_ssh}}, 'mkdir', '-p', $self->{rootdir} ];
$self->cmd_quiet($cmd, errmsg => "Failed to make container root directory");
my $privatedir = $self->{privatedir};
if (!$self->{shared}) {
$cmd = [ @{$self->{rem_ssh}}, 'mkdir', '-p', $privatedir ];
$self->cmd_quiet($cmd, errmsg => "Failed to make container private directory");
$self->{undo_private} = $privatedir;
$self->log('info', "starting rsync phase 1");
my $basedir = dirname($privatedir);
$cmd = [ @{$self->{rsync_cmd}}, '--sparse', $privatedir, "root\@$self->{nodeip}:$basedir" ];
$self->cmd($cmd, errmsg => "Failed to sync container private area");
} else {
$self->log('info', "container data is on shared storage '$self->{storage}'");
}
my $conffile = PVE::OpenVZ::config_file($vmid);
my $newconffile = PVE::OpenVZ::config_file($vmid, $self->{node});
my $srccfgdir = dirname($conffile);
my $newcfgdir = dirname($newconffile);
foreach my $s (PVE::OpenVZ::SCRIPT_EXT) {
my $scriptfn = "${vmid}.$s";
my $srcfn = "$srccfgdir/$scriptfn";
next if ! -f $srcfn;
my $dstfn = "$newcfgdir/$scriptfn";
copy($srcfn, $dstfn) || die "copy '$srcfn' to '$dstfn' failed - $!\n";
}
if ($self->{running}) {
# fixme: save state and quota
$self->log('info', "start live migration - suspending container");
$cmd = [ 'vzctl', '--skiplock', 'chkpnt', $vmid, '--suspend' ];
$self->cmd_quiet($cmd, errmsg => "Failed to suspend container");
$self->{undo_suspend} = 1;
$self->log('info', "dump container state");
$self->{dumpfile} = "$self->{dumpdir}/dump.$vmid";
$cmd = [ 'vzctl', '--skiplock', 'chkpnt', $vmid, '--dump', '--dumpfile', $self->{dumpfile} ];
$self->cmd_quiet($cmd, errmsg => "Failed to dump container state");
if (!$self->{shared}) {
$self->log('info', "copy dump file to target node");
$self->{undo_copy_dump} = 1;
$cmd = [ @{$self->{scp_cmd}}, $self->{dumpfile}, "root\@$self->{nodeip}:$self->{dumpfile}"];
$self->cmd_quiet($cmd, errmsg => "Failed to copy dump file");
$self->log('info', "starting rsync (2nd pass)");
my $basedir = dirname($privatedir);
$cmd = [ @{$self->{rsync_cmd}}, $privatedir, "root\@$self->{nodeip}:$basedir" ];
$self->cmd($cmd, errmsg => "Failed to sync container private area");
}
} else {
if (PVE::OpenVZ::check_mounted($conf, $vmid)) {
$self->log('info', "unmounting container");
$cmd = [ 'vzctl', '--skiplock', 'umount', $vmid ];
$self->cmd_quiet($cmd, errmsg => "Failed to umount container");
}
}
my $disk_quota = PVE::OpenVZ::get_disk_quota($conf);
if (!defined($disk_quota) || ($disk_quota != 0)) {
$disk_quota = $self->{disk_quota} = 1;
$self->log('info', "dump 2nd level quota");
$self->{quotadumpfile} = "$self->{dumpdir}/quotadump.$vmid";
$cmd = "vzdqdump $vmid -U -G -T > " . PVE::Tools::shellquote($self->{quotadumpfile});
$self->cmd_quiet($cmd, errmsg => "Failed to dump 2nd level quota");
if (!$self->{shared}) {
$self->log('info', "copy 2nd level quota to target node");
$self->{undo_copy_quota_dump} = 1;
$cmd = [@{$self->{scp_cmd}}, $self->{quotadumpfile},
"root\@$self->{nodeip}:$self->{quotadumpfile}"];
$self->cmd_quiet($cmd, errmsg => "Failed to copy 2nd level quota dump");
}
}
# everythin copied - make sure container is stoped
# fixme_ do we need to start on the other node first?
if ($self->{running}) {
delete $self->{undo_suspend};
$cmd = [ 'vzctl', '--skiplock', 'chkpnt', $vmid, '--kill' ];
$self->cmd_quiet($cmd, errmsg => "Failed to kill container");
$cmd = [ 'vzctl', '--skiplock', 'umount', $vmid ];
sleep(1); # hack: wait - else there are open files
$self->cmd_quiet($cmd, errmsg => "Failed to umount container");
}
# move config
die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
if !rename($conffile, $newconffile);
}
sub phase1_cleanup {
my ($self, $vmid, $err) = @_;
$self->log('info', "aborting phase 1 - cleanup resources");
my $conf = $self->{vmconf};
if ($self->{undo_suspend}) {
my $cmd = [ 'vzctl', '--skiplock', 'chkpnt', $vmid, '--resume' ];
$self->cmd_logerr($cmd, errmsg => "Failed to resume container");
}
if ($self->{undo_private}) {
$self->log('info', "removing copied files on target node");
my $cmd = [ @{$self->{rem_ssh}}, 'rm', '-rf', $self->{undo_private} ];
$self->cmd_logerr($cmd, errmsg => "Failed to remove copied files");
}
# fixme: that seem to be very dangerous and not needed
#my $cmd = [ @{$self->{rem_ssh}}, 'rm', '-rf', $self->{rootdir} ];
#eval { $self->cmd_quiet($cmd); };
my $newconffile = PVE::OpenVZ::config_file($vmid, $self->{node});
my $newcfgdir = dirname($newconffile);
foreach my $s (PVE::OpenVZ::SCRIPT_EXT) {
my $scriptfn = "${vmid}.$s";
my $dstfn = "$newcfgdir/$scriptfn";
if (-f $dstfn) {
$self->log('err', "unlink '$dstfn' failed - $!") if !unlink $dstfn;
}
}
}
sub init_target_vm {
my ($self, $vmid) = @_;
my $conf = $self->{vmconf};
$self->log('info', "initialize container on remote node '$self->{node}'");
my $cmd = [ @{$self->{rem_ssh}}, 'vzctl', '--quiet', 'set', $vmid,
'--applyconfig_map', 'name', '--save' ];
$self->cmd_quiet($cmd, errmsg => "Failed to apply config on target node");
if ($self->{disk_quota}) {
$self->log('info', "initializing remote quota");
$cmd = [ @{$self->{rem_ssh}}, 'vzctl', 'quotainit', $vmid];
$self->cmd_quiet($cmd, errmsg => "Failed to initialize quota");
$self->log('info', "turn on remote quota");
$cmd = [ @{$self->{rem_ssh}}, 'vzctl', 'quotaon', $vmid];
$self->cmd_quiet($cmd, errmsg => "Failed to turn on quota");
$self->log('info', "load 2nd level quota");
$cmd = [ @{$self->{rem_ssh}}, "(vzdqload $vmid -U -G -T < " .
PVE::Tools::shellquote($self->{quotadumpfile}) .
" && vzquota reload2 $vmid)"];
$self->cmd_quiet($cmd, errmsg => "Failed to load 2nd level quota");
if (!$self->{running}) {
$self->log('info', "turn off remote quota");
$cmd = [ @{$self->{rem_ssh}}, 'vzquota', 'off', $vmid];
$self->cmd_quiet($cmd, errmsg => "Failed to turn off quota");
}
}
}
sub phase2 {
my ($self, $vmid) = @_;
my $conf = $self->{vmconf};
$self->{target_initialized} = 1;
init_target_vm($self, $vmid);
$self->log('info', "starting container on remote node '$self->{node}'");
$self->log('info', "restore container state");
$self->{dumpfile} = "$self->{dumpdir}/dump.$vmid";
my $cmd = [ @{$self->{rem_ssh}}, 'vzctl', 'restore', $vmid, '--undump',
'--dumpfile', $self->{dumpfile}, '--skip_arpdetect' ];
$self->cmd_quiet($cmd, errmsg => "Failed to restore container");
$cmd = [ @{$self->{rem_ssh}}, 'vzctl', 'restore', $vmid, '--resume' ];
$self->cmd_quiet($cmd, errmsg => "Failed to resume container");
}
sub phase3 {
my ($self, $vmid) = @_;
if (!$self->{target_initialized}) {
init_target_vm($self, $vmid);
}
}
sub phase3_cleanup {
my ($self, $vmid, $err) = @_;
my $conf = $self->{vmconf};
if (!$self->{shared}) {
# destroy local container data
$self->log('info', "removing container files on local node");
my $cmd = [ 'rm', '-rf', $self->{privatedir} ];
$self->cmd_logerr($cmd);
}
if ($self->{disk_quota}) {
my $cmd = [ 'vzquota', 'drop', $vmid];
$self->cmd_logerr($cmd, errmsg => "Failed to drop local quota");
}
}
sub final_cleanup {
my ($self, $vmid) = @_;
$self->log('info', "start final cleanup");
my $conf = $self->{vmconf};
unlink($self->{quotadumpfile}) if $self->{quotadumpfile};
unlink($self->{dumpfile}) if $self->{dumpfile};
if ($self->{undo_copy_dump} && $self->{dumpfile}) {
my $cmd = [ @{$self->{rem_ssh}}, 'rm', '-f', $self->{dumpfile} ];
$self->cmd_logerr($cmd, errmsg => "Failed to remove dump file");
}
if ($self->{undo_copy_quota_dump} && $self->{quotadumpfile}) {
my $cmd = [ @{$self->{rem_ssh}}, 'rm', '-f', $self->{quotadumpfile} ];
$self->cmd_logerr($cmd, errmsg => "Failed to remove 2nd level quota dump file");
}
}
1;

View File

@ -13,7 +13,6 @@ use File::Path;
use PVE::RPCEnvironment;
use PVE::Storage;
use PVE::Cluster qw(cfs_read_file);
use PVE::VZDump::OpenVZ;
use Time::localtime;
use Time::Local;
use PVE::JSONSchema qw(get_standard_option);
@ -26,7 +25,7 @@ my $pidfile = '/var/run/vzdump.pid';
my $logdir = '/var/log/vzdump';
my @plugins = qw (PVE::VZDump::OpenVZ);
my @plugins = qw();
# Load available plugins
my $pveplug = "/usr/share/perl5/PVE/VZDump/QemuServer.pm";
@ -850,29 +849,12 @@ sub exec_backup_task {
$self->run_hook_script ('backup-start', $task, $logfd);
if ($vmtype eq 'openvz') {
# pre-suspend rsync
$plugin->copy_data_phase1 ($task, $vmid);
}
debugmsg ('info', "suspend vm", $logfd);
$vmstoptime = time ();
$self->run_hook_script ('pre-stop', $task, $logfd);
$plugin->suspend_vm ($task, $vmid);
$cleanup->{resume} = 1;
if ($vmtype eq 'openvz') {
# post-suspend rsync
$plugin->copy_data_phase2 ($task, $vmid);
debugmsg ('info', "resume vm", $logfd);
$cleanup->{resume} = 0;
$self->run_hook_script ('pre-restart', $task, $logfd);
$plugin->resume_vm ($task, $vmid);
my $delay = time () - $vmstoptime;
debugmsg ('info', "vm is online again after $delay seconds", $logfd);
}
} elsif ($mode eq 'snapshot') {
$self->run_hook_script ('backup-start', $task, $logfd);

View File

@ -1,7 +1,6 @@
include ../../defines.mk
PERLSOURCE = \
OpenVZ.pm \
Plugin.pm
all:

View File

@ -1,312 +0,0 @@
package PVE::VZDump::OpenVZ;
use strict;
use warnings;
use File::Path;
use File::Basename;
use PVE::INotify;
use PVE::VZDump;
use PVE::OpenVZ;
use base qw (PVE::VZDump::Plugin);
my $load_vz_conf = sub {
my ($self, $vmid) = @_;
my $conf = PVE::OpenVZ::load_config($vmid);
my $dir = $self->{privatedir};
if ($conf->{ve_private} && $conf->{ve_private}->{value}) {
$dir = $conf->{ve_private}->{value};
}
$dir =~ s/\$VEID/$vmid/;
$self->{vmlist}->{$vmid}->{dir} = $dir;
my $hostname = "CT $vmid";
if ($conf->{hostname} && $conf->{hostname}->{value}) {
$hostname = $conf->{hostname}->{value};
}
$self->{vmlist}->{$vmid}->{hostname} = $hostname;
};
my $rsync_vm = sub {
my ($self, $task, $from, $to, $text) = @_;
$self->loginfo ("starting $text sync $from to $to");
my $starttime = time();
my $opts = $self->{vzdump}->{opts};
my $rsyncopts = "--stats -x --numeric-ids";
$rsyncopts .= " --bwlimit=$opts->{bwlimit}" if $opts->{bwlimit};
$self->cmd ("rsync $rsyncopts -aH --delete --no-whole-file --inplace '$from' '$to'");
my $delay = time () - $starttime;
$self->loginfo ("$text sync finished ($delay seconds)");
};
sub new {
my ($class, $vzdump) = @_;
PVE::VZDump::check_bin ('vzctl');
my $self = bless PVE::OpenVZ::read_global_vz_config ();
$self->{vzdump} = $vzdump;
$self->{vmlist} = PVE::OpenVZ::config_list();
return $self;
};
sub type {
return 'openvz';
}
sub vm_status {
my ($self, $vmid) = @_;
my $status_text = '';
$self->cmd ("vzctl status $vmid", outfunc => sub {$status_text .= shift; });
chomp $status_text;
my $running = $status_text =~ m/running/ ? 1 : 0;
return wantarray ? ($running, $running ? 'running' : 'stopped') : $running;
}
sub prepare {
my ($self, $task, $vmid, $mode) = @_;
$self->$load_vz_conf ($vmid);
my $dir = $self->{vmlist}->{$vmid}->{dir};
my $diskinfo = { dir => $dir };
$task->{hostname} = $self->{vmlist}->{$vmid}->{hostname};
$task->{diskinfo} = $diskinfo;
my $hostname = PVE::INotify::nodename();
if ($mode eq 'snapshot') {
my $lvmmap = PVE::VZDump::get_lvm_mapping();
my ($srcdev, $lvmpath, $lvmvg, $lvmlv, $fstype) =
PVE::VZDump::get_lvm_device ($dir, $lvmmap);
my $targetdev = PVE::VZDump::get_lvm_device ($task->{dumpdir}, $lvmmap);
die ("mode failure - unable to detect lvm volume group\n") if !$lvmvg;
die ("mode failure - wrong lvm mount point '$lvmpath'\n") if $dir !~ m|/?$lvmpath/?|;
die ("mode failure - unable to dump into snapshot (use option --dumpdir)\n")
if $targetdev eq $srcdev;
$diskinfo->{snapname} = "vzsnap-$hostname-0";
$diskinfo->{snapdev} = "/dev/$lvmvg/$diskinfo->{snapname}";
$diskinfo->{srcdev} = $srcdev;
$diskinfo->{lvmvg} = $lvmvg;
$diskinfo->{lvmlv} = $lvmlv;
$diskinfo->{fstype} = $fstype;
$diskinfo->{lvmpath} = $lvmpath;
$diskinfo->{mountpoint} = "/mnt/vzsnap0";
$task->{snapdir} = $dir;
$task->{snapdir} =~ s|/?$lvmpath/?|$diskinfo->{mountpoint}/|;
} elsif ($mode eq 'suspend') {
$task->{snapdir} = $task->{tmpdir};
} else {
$task->{snapdir} = $dir;
}
}
sub lock_vm {
my ($self, $vmid) = @_;
my $filename = "$self->{lockdir}/103.lck";
my $lockmgr = PVE::OpenVZ::create_lock_manager();
$self->{lock} = $lockmgr->lock($filename) || die "can't lock VM $vmid\n";
}
sub unlock_vm {
my ($self, $vmid) = @_;
$self->{lock}->release();
}
sub copy_data_phase1 {
my ($self, $task) = @_;
$self->$rsync_vm ($task, "$task->{diskinfo}->{dir}/", $task->{snapdir}, "first");
}
# we use --skiplock for vzctl because we have already locked the VM
# by calling lock_vm()
sub stop_vm {
my ($self, $task, $vmid) = @_;
$self->cmd ("vzctl --skiplock stop $vmid");
}
sub start_vm {
my ($self, $task, $vmid) = @_;
$self->cmd ("vzctl --skiplock start $vmid");
}
sub suspend_vm {
my ($self, $task, $vmid) = @_;
$self->cmd ("vzctl --skiplock chkpnt $vmid --suspend");
}
sub snapshot {
my ($self, $task) = @_;
my $opts = $self->{vzdump}->{opts};
my $di = $task->{diskinfo};
mkpath $di->{mountpoint}; # create mount point for lvm snapshot
if (-b $di->{snapdev}) {
$self->loginfo ("trying to remove stale snapshot '$di->{snapdev}'");
$self->cmd_noerr ("umount $di->{mountpoint}");
$self->cmd_noerr ("lvremove -f $di->{snapdev}");
}
$self->loginfo ("creating lvm snapshot of $di->{srcdev} ('$di->{snapdev}')");
$task->{cleanup}->{lvm_snapshot} = 1;
$self->cmd ("lvcreate --size $opts->{size}M --snapshot" .
" --name $di->{snapname} /dev/$di->{lvmvg}/$di->{lvmlv}");
my $mopts = $di->{fstype} eq 'xfs' ? "-o nouuid" : '';
$task->{cleanup}->{snapshot_mount} = 1;
$self->cmd ("mount -n -t $di->{fstype} $mopts $di->{snapdev} $di->{mountpoint}");
}
sub copy_data_phase2 {
my ($self, $task) = @_;
$self->$rsync_vm ($task, "$task->{diskinfo}->{dir}/", $task->{snapdir}, "final");
}
sub resume_vm {
my ($self, $task, $vmid) = @_;
$self->cmd ("vzctl --skiplock chkpnt $vmid --resume");
}
sub assemble {
my ($self, $task, $vmid) = @_;
my $conffile = PVE::OpenVZ::config_file($vmid);
my $dir = $task->{snapdir};
$task->{cleanup}->{etc_vzdump} = 1;
mkpath "$dir/etc/vzdump/";
$self->cmd ("cp '$conffile' '$dir/etc/vzdump/vps.conf'");
my $cfgdir = dirname ($conffile);
foreach my $s (PVE::OpenVZ::SCRIPT_EXT) {
my $fn = "$cfgdir/$vmid.$s";
$self->cmd ("cp '$fn' '$dir/etc/vzdump/vps.$s'") if -f $fn;
}
}
sub archive {
my ($self, $task, $vmid, $filename, $comp) = @_;
my $findexcl = $self->{vzdump}->{findexcl};
my $findargs = join (' ', @$findexcl) . ' -print0';
my $opts = $self->{vzdump}->{opts};
my $srcdir = $self->{vmlist}->{$vmid}->{dir};
my $snapdir = $task->{snapdir};
my $taropts = "--totals --sparse --numeric-owner --no-recursion --one-file-system";
# note: --remove-files does not work because we do not
# backup all files (filters). tar complains:
# Cannot rmdir: Directory not empty
# we we disable this optimization for now
#if ($snapdir eq $task->{tmpdir} && $snapdir =~ m|^$opts->{dumpdir}/|) {
# $taropts .= " --remove-files"; # try to save space
#}
my $cmd = "(";
$cmd .= "cd $snapdir;find . $findargs|sed 's/\\\\/\\\\\\\\/g'|";
$cmd .= "tar cpf - $taropts --null -T -";
my $bwl = $opts->{bwlimit}*1024; # bandwidth limit for cstream
$cmd .= "|cstream -t $bwl" if $opts->{bwlimit};
$cmd .= "|$comp" if $comp;
$cmd .= ")";
if ($opts->{stdout}) {
$self->cmd ($cmd, output => ">&=" . fileno($opts->{stdout}));
} else {
$self->cmd ("$cmd >$filename");
}
}
sub cleanup {
my ($self, $task, $vmid) = @_;
my $di = $task->{diskinfo};
if ($task->{cleanup}->{snapshot_mount}) {
# Note: sleep to avoid 'device is busy' message.
# Seems Kernel need some time to cleanup open file list,
# fir example when we stop the tar with kill (stop task)
sleep(1);
$self->cmd_noerr ("umount $di->{mountpoint}");
}
if ($task->{cleanup}->{lvm_snapshot}) {
# loop, because we often get 'LV in use: not deactivating'
# we use run_command() because we do not want to log errors here
my $wait = 1;
while(-b $di->{snapdev}) {
eval {
my $cmd = ['lvremove', '-f', $di->{snapdev}];
PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {});
};
last if !$@;
if ($wait >= 64) {
$self->logerr($@);
last;
}
$self->loginfo("lvremove failed - trying again in $wait seconds") if $wait >= 8;
sleep($wait);
$wait = $wait*2;
}
}
if ($task->{cleanup}->{etc_vzdump}) {
my $dir = "$task->{snapdir}/etc/vzdump";
eval { rmtree $dir if -d $dir; };
$self->logerr ($@) if $@;
}
}
1;

View File

@ -5,12 +5,10 @@ SUBDIRS = init.d cron ocf test
SCRIPTS = \
pveceph \
vzdump \
vzrestore \
pvestatd \
pvesh \
pveam \
pvebanner \
pvectl \
pvedaemon \
pveproxy \
spiceproxy \
@ -22,9 +20,7 @@ SCRIPTS = \
MANS = \
pveceph.1 \
pvectl.1 \
vzdump.1 \
vzrestore.1 \
pvestatd.1 \
pvedaemon.1 \
pveproxy.1 \
@ -82,10 +78,8 @@ install: ${SCRIPTS} ${MANS} pvemailforward
install -d ${MAN1DIR}
install -m 0644 ${MANS} ${MAN1DIR}
install -d ${PODDIR}
install -m 0644 pvectl.1.pod ${PODDIR}
install -m 0644 vzdump.1.pod ${PODDIR}
install -m 0644 pvesubscription.1.pod ${PODDIR}
install -m 0644 vzrestore.1.pod ${PODDIR}
set -e && for i in ${SUBDIRS}; do ${MAKE} -C $$i $@; done
.PHONY: distclean

View File

@ -1,106 +0,0 @@
#!/usr/bin/perl
use strict;
use warnings;
use PVE::Tools qw(extract_param);
use PVE::Cluster qw(cfs_register_file cfs_read_file);
use PVE::SafeSyslog;
use PVE::INotify;
use PVE::RPCEnvironment;
use PVE::CLIHandler;
use PVE::API2::OpenVZ;
use Data::Dumper; # fixme: remove
use base qw(PVE::CLIHandler);
$ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
initlog('pvectl');
die "please run as root\n" if $> != 0;
PVE::INotify::inotify_init();
my $nodename = PVE::INotify::nodename();
my $rpcenv = PVE::RPCEnvironment->init('cli');
$rpcenv->init_request();
$rpcenv->set_language($ENV{LANG});
$rpcenv->set_user('root@pam');
my $upid_exit = sub {
my $upid = shift;
my $status = PVE::Tools::upid_read_status($upid);
exit($status eq 'OK' ? 0 : -1);
};
my $cmddef = {
list => [ "PVE::API2::OpenVZ", 'vmlist', [],
{ node => $nodename }, sub {
my $vmlist = shift;
exit 0 if (!scalar(@$vmlist));
printf "%10s %-20s %-10s %-10s %-12s\n",
qw(VMID NAME STATUS MEM(MB) DISK(GB));
foreach my $rec (sort { $a->{vmid} <=> $b->{vmid} } @$vmlist) {
printf "%10s %-20s %-10s %-10s %-12.2f\n", $rec->{vmid}, $rec->{name} || '',
$rec->{status},
($rec->{maxmem} || 0)/(1024*1024),
($rec->{maxdisk} || 0)/(1024*1024*1024);
}
} ],
create => [ 'PVE::API2::OpenVZ', 'create_vm', ['vmid', 'ostemplate'], { node => $nodename }, $upid_exit ],
destroy => [ 'PVE::API2::OpenVZ', 'destroy_vm', ['vmid'], { node => $nodename }, $upid_exit ],
set => [ "PVE::API2::OpenVZ", 'update_vm', ['vmid'], { node => $nodename } ],
config => [ "PVE::API2::OpenVZ", 'vm_config', ['vmid'],
{ node => $nodename }, sub {
my $config = shift;
foreach my $k (sort (keys %$config)) {
next if $k eq 'digest';
my $v = $config->{$k};
if ($k eq 'description') {
$v = PVE::Tools::encode_text($v);
}
print "$k: $v\n";
}
}],
start => [ 'PVE::API2::OpenVZ', 'vm_start', ['vmid'], { node => $nodename }, $upid_exit],
suspend => [ 'PVE::API2::OpenVZ', 'vm_suspend', ['vmid'], { node => $nodename }, $upid_exit],
resume => [ 'PVE::API2::OpenVZ', 'vm_resume', ['vmid'], { node => $nodename }, $upid_exit],
shutdown => [ 'PVE::API2::OpenVZ', 'vm_shutdown', ['vmid'], { node => $nodename }, $upid_exit],
stop => [ 'PVE::API2::OpenVZ', 'vm_stop', ['vmid'], { node => $nodename }, $upid_exit],
mount => [ 'PVE::API2::OpenVZ', 'vm_mount', ['vmid'], { node => $nodename }, $upid_exit],
umount => [ 'PVE::API2::OpenVZ', 'vm_umount', ['vmid'], { node => $nodename }, $upid_exit],
migrate => [ "PVE::API2::OpenVZ", 'migrate_vm', ['vmid', 'target'], { node => $nodename }, $upid_exit],
};
my $cmd = shift;
PVE::CLIHandler::handle_cmd($cmddef, "pvectl", $cmd, \@ARGV, undef, $0);
exit 0;
__END__
=head1 NAME
pvectl - vzctl wrapper to manage OpenVZ containers
=head1 SYNOPSIS
=include synopsis
=head1 DESCRIPTION
This is a small wrapper around vztl.
=include pve_copyright

View File

@ -13,7 +13,6 @@ use PVE::INotify;
use PVE::Cluster qw(cfs_read_file);
use PVE::Storage;
use PVE::QemuServer;
use PVE::OpenVZ;
use PVE::RPCEnvironment;
use PVE::API2::Subscription;
use PVE::AutoBalloon;
@ -202,45 +201,6 @@ sub find_vzctl_console_pids {
return $res;
}
sub remove_stale_openvz_consoles {
my $vmstatus = PVE::OpenVZ::vmstatus();
my $pidhash = find_vzctl_console_pids();
foreach my $vmid (keys %$pidhash) {
next if defined($vmstatus->{$vmid});
syslog('info', "remove stale vzctl console for CT $vmid");
foreach my $pid (@{$pidhash->{$vmid}}) {
kill(9, $pid);
}
}
}
sub update_openvz_status {
my $ctime = time();
my $vmstatus = PVE::OpenVZ::vmstatus();
foreach my $vmid (keys %$vmstatus) {
my $d = $vmstatus->{$vmid};
my $data;
if ($d->{status} eq 'running') { # running
$data = "$d->{uptime}:$d->{name}:$d->{status}:0:$ctime:$d->{cpus}:$d->{cpu}:" .
"$d->{maxmem}:$d->{mem}:" .
"$d->{maxdisk}:$d->{disk}:" .
"$d->{netin}:$d->{netout}:" .
"$d->{diskread}:$d->{diskwrite}";
} else {
$data = "0:$d->{name}:$d->{status}:0:$ctime:$d->{cpus}::" .
"$d->{maxmem}::" .
"$d->{maxdisk}:$d->{disk}:" .
":::";
}
PVE::Cluster::broadcast_rrd("pve2.3-vm/$vmid", $data);
}
}
sub update_storage_status {
my $cfg = cfs_read_file("storage.cfg");
@ -287,23 +247,11 @@ sub update_status {
$err = $@;
syslog('err', "qemu status update error: $err") if $err;
eval {
update_openvz_status();
};
$err = $@;
syslog('err', "openvz status update error: $err") if $err;
eval {
update_storage_status();
};
$err = $@;
syslog('err', "storage status update error: $err") if $err;
eval {
remove_stale_openvz_consoles();
};
$err = $@;
syslog('err', "openvz console cleanup error: $err") if $err;
}
my $next_update = 0;

View File

@ -1,102 +0,0 @@
#!/usr/bin/perl -w
use strict;
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param);
use PVE::INotify;
use PVE::RPCEnvironment;
use PVE::CLIHandler;
use PVE::JSONSchema qw(get_standard_option);
use PVE::API2::OpenVZ;
use Data::Dumper; # fixme: remove
use base qw(PVE::CLIHandler);
$ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
initlog('vzrestore');
die "please run as root\n" if $> != 0;
PVE::INotify::inotify_init();
my $rpcenv = PVE::RPCEnvironment->init('cli');
$rpcenv->init_request();
$rpcenv->set_language($ENV{LANG});
$rpcenv->set_user('root@pam');
__PACKAGE__->register_method({
name => 'vzrestore',
path => 'vzrestore',
method => 'POST',
description => "Restore OpenVZ containers.",
parameters => {
additionalProperties => 0,
properties => {
vmid => get_standard_option('pve-vmid'),
archive => {
description => "The backup file. You can pass '-' to read from standard input.",
type => 'string',
maxLength => 255,
},
storage => get_standard_option('pve-storage-id', {
description => "Target storage.",
default => 'local',
optional => 1,
}),
force => {
optional => 1,
type => 'boolean',
description => "Allow to overwrite existing container.",
},
},
},
returns => {
type => 'string',
},
code => sub {
my ($param) = @_;
$param->{ostemplate} = extract_param($param, 'archive');
$param->{node} = PVE::INotify::nodename();
$param->{restore} = 1;
return PVE::API2::OpenVZ->create_vm($param);
}});
my $cmddef = [ __PACKAGE__, 'vzrestore', ['archive', 'vmid'], undef,
sub {
my $upid = shift;
my $status = PVE::Tools::upid_read_status($upid);
exit($status eq 'OK' ? 0 : -1);
}];
push @ARGV, 'help' if !scalar(@ARGV);
PVE::CLIHandler::handle_simple_cmd($cmddef, \@ARGV, undef, $0);
exit 0;
__END__
=head1 NAME
vzrestore - restore OpenVZ vzdump backups
=head1 SYNOPSIS
=include synopsis
=head1 DESCRIPTION
Restores OpenVZ vzdump backups.
=head1 SEE ALSO
vzdump(1) qmrestore(1)
=include pve_copyright