2011-10-12 08:25:18 +02:00
package PVE::VZDump ;
use strict ;
use warnings ;
2020-02-10 11:11:21 +01:00
2011-10-12 08:25:18 +02:00
use Fcntl ':flock' ;
2020-02-10 11:11:21 +01:00
use File::Path ;
2011-10-12 08:25:18 +02:00
use IO::File ;
use IO::Select ;
use IPC::Open3 ;
2019-04-16 14:08:54 +00:00
use POSIX qw( strftime ) ;
2011-10-12 08:25:18 +02:00
use Time::Local ;
2020-02-10 11:11:21 +01:00
use PVE::Cluster qw( cfs_read_file ) ;
use PVE::DataCenterConfig ;
use PVE::Exception qw( raise_param_exc ) ;
2015-09-18 11:21:03 +02:00
use PVE::HA::Config ;
2020-02-10 11:11:21 +01:00
use PVE::HA::Env::PVE2 ;
use PVE::JSONSchema qw( get_standard_option ) ;
use PVE::RPCEnvironment ;
use PVE::Storage ;
2019-10-15 13:00:22 +02:00
use PVE::VZDump::Common ;
2020-02-10 11:11:21 +01:00
use PVE::VZDump::Plugin ;
2020-06-17 15:45:21 +02:00
use PVE::Tools qw( extract_param split_list ) ;
2020-06-08 15:00:34 +02:00
use PVE::API2Tools ;
2011-10-12 08:25:18 +02:00
my @ posix_filesystems = qw( ext3 ext4 nfs nfs4 reiserfs xfs ) ;
my $ lockfile = '/var/run/vzdump.lock' ;
2014-12-30 14:30:11 +01:00
my $ pidfile = '/var/run/vzdump.pid' ;
2011-10-12 08:25:18 +02:00
my $ logdir = '/var/log/vzdump' ;
2015-03-27 13:10:51 +01:00
my @ plugins = qw( ) ;
2011-10-12 08:25:18 +02:00
2019-10-15 13:00:22 +02:00
my $ confdesc = PVE::VZDump::Common:: get_confdesc ( ) ;
2015-07-27 13:14:30 +02:00
2011-10-12 08:25:18 +02:00
# Load available plugins
2015-04-29 09:11:44 +02:00
my @ pve_vzdump_classes = qw( PVE::VZDump::QemuServer PVE::VZDump::LXC ) ;
foreach my $ plug ( @ pve_vzdump_classes ) {
my $ filename = "/usr/share/perl5/$plug.pm" ;
$ filename =~ s!::!/!g ;
if ( - f $ filename ) {
eval { require $ filename ; } ;
if ( ! $@ ) {
$ plug - > import ( ) ;
push @ plugins , $ plug ;
} else {
2016-11-16 20:08:28 +01:00
die $@ ;
2015-04-29 09:11:44 +02:00
}
}
2011-10-12 08:25:18 +02:00
}
# helper functions
sub debugmsg {
my ( $ mtype , $ msg , $ logfd , $ syslog ) = @ _ ;
2017-09-13 10:30:15 +02:00
PVE::VZDump::Plugin:: debugmsg ( @ _ ) ;
2011-10-12 08:25:18 +02:00
}
sub run_command {
my ( $ logfd , $ cmdstr , % param ) = @ _ ;
2011-10-14 10:54:05 +02:00
my $ logfunc = sub {
2011-10-13 14:33:12 +02:00
my $ line = shift ;
debugmsg ( 'info' , $ line , $ logfd ) ;
2011-10-12 08:25:18 +02:00
} ;
2011-10-14 10:54:05 +02:00
PVE::Tools:: run_command ( $ cmdstr , % param , logfunc = > $ logfunc ) ;
2011-10-12 08:25:18 +02:00
}
2020-12-01 09:24:20 +01:00
my $ parse_prune_backups_maxfiles = sub {
my ( $ param , $ kind ) = @ _ ;
my $ maxfiles = delete $ param - > { maxfiles } ;
my $ prune_backups = $ param - > { 'prune-backups' } ;
2020-12-04 10:15:18 +01:00
debugmsg ( 'warn' , "both 'maxfiles' and 'prune-backups' defined as ${kind} - ignoring 'maxfiles'" )
2020-12-01 09:24:20 +01:00
if defined ( $ maxfiles ) && defined ( $ prune_backups ) ;
if ( defined ( $ prune_backups ) ) {
2021-01-15 13:49:05 +01:00
return if ref ( $ prune_backups ) eq 'HASH' ; # already parsed
2020-12-01 09:24:20 +01:00
$ param - > { 'prune-backups' } = PVE::JSONSchema:: parse_property_string (
'prune-backups' ,
$ prune_backups
) ;
} elsif ( defined ( $ maxfiles ) ) {
if ( $ maxfiles ) {
$ param - > { 'prune-backups' } = { 'keep-last' = > $ maxfiles } ;
} else {
$ param - > { 'prune-backups' } = { 'keep-all' = > 1 } ;
}
}
} ;
2011-10-12 08:25:18 +02:00
sub storage_info {
my $ storage = shift ;
2016-03-25 13:56:17 +01:00
my $ cfg = PVE::Storage:: config ( ) ;
2011-10-13 14:33:12 +02:00
my $ scfg = PVE::Storage:: storage_config ( $ cfg , $ storage ) ;
2011-10-12 08:25:18 +02:00
my $ type = $ scfg - > { type } ;
2019-06-19 12:08:36 +02:00
die "can't use storage type '$type' for backup\n"
2018-04-05 13:29:50 +02:00
if ( ! ( $ type eq 'dir' || $ type eq 'nfs' || $ type eq 'glusterfs'
2020-02-19 13:59:43 +01:00
|| $ type eq 'cifs' || $ type eq 'cephfs' || $ type eq 'pbs' ) ) ;
2019-06-19 12:08:36 +02:00
die "can't use storage '$storage' for backups - wrong content type\n"
2011-10-12 08:25:18 +02:00
if ( ! $ scfg - > { content } - > { backup } ) ;
2020-06-30 10:24:25 +02:00
my $ info = {
scfg = > $ scfg ,
} ;
2020-09-29 10:37:04 +02:00
$ info - > { 'prune-backups' } = PVE::JSONSchema:: parse_property_string ( 'prune-backups' , $ scfg - > { 'prune-backups' } )
if defined ( $ scfg - > { 'prune-backups' } ) ;
2020-02-19 13:59:43 +01:00
if ( $ type eq 'pbs' ) {
2020-06-30 10:24:25 +02:00
$ info - > { pbs } = 1 ;
2020-02-19 13:59:43 +01:00
} else {
2020-06-30 10:24:25 +02:00
$ info - > { dumpdir } = PVE::Storage:: get_backup_dir ( $ cfg , $ storage ) ;
2020-02-19 13:59:43 +01:00
}
2020-06-30 10:24:25 +02:00
return $ info ;
2011-10-12 08:25:18 +02:00
}
sub format_size {
my $ size = shift ;
my $ kb = $ size / 1024 ;
if ( $ kb < 1024 ) {
return int ( $ kb ) . "KB" ;
}
my $ mb = $ size / ( 1024 * 1024 ) ;
if ( $ mb < 1024 ) {
return int ( $ mb ) . "MB" ;
2020-06-08 15:54:35 +02:00
}
my $ gb = $ mb / 1024 ;
if ( $ gb < 1024 ) {
2011-10-12 08:25:18 +02:00
return sprintf ( "%.2fGB" , $ gb ) ;
2019-06-19 12:08:36 +02:00
}
2020-06-08 15:54:35 +02:00
my $ tb = $ gb / 1024 ;
return sprintf ( "%.2fTB" , $ tb ) ;
2011-10-12 08:25:18 +02:00
}
sub format_time {
my $ seconds = shift ;
my $ hours = int ( $ seconds / 3600 ) ;
$ seconds = $ seconds - $ hours * 3600 ;
my $ min = int ( $ seconds / 60 ) ;
$ seconds = $ seconds - $ min * 60 ;
return sprintf ( "%02d:%02d:%02d" , $ hours , $ min , $ seconds ) ;
}
sub encode8bit {
my ( $ str ) = @ _ ;
$ str =~ s/^(.{990})/$1\n/mg ; # reduce line length
return $ str ;
}
sub escape_html {
my ( $ str ) = @ _ ;
$ str =~ s/&/&/g ;
$ str =~ s/</</g ;
$ str =~ s/>/>/g ;
return $ str ;
}
sub check_bin {
my ( $ bin ) = @ _ ;
foreach my $ p ( split ( /:/ , $ ENV { PATH } ) ) {
my $ fn = "$p/$bin" ;
if ( - x $ fn ) {
return $ fn ;
}
}
die "unable to find command '$bin'\n" ;
}
sub check_vmids {
my ( @ vmids ) = @ _ ;
my $ res = [] ;
2020-06-17 15:45:56 +02:00
for my $ vmid ( sort { $ a <=> $ b } @ vmids ) {
2011-10-12 08:25:18 +02:00
die "ERROR: strange VM ID '${vmid}'\n" if $ vmid !~ m/^\d+$/ ;
$ vmid = int ( $ vmid ) ; # remove leading zeros
2011-10-13 14:33:12 +02:00
next if ! $ vmid ;
2011-10-12 08:25:18 +02:00
push @$ res , $ vmid ;
}
return $ res ;
}
sub read_vzdump_defaults {
my $ fn = "/etc/vzdump.conf" ;
2015-07-27 13:14:30 +02:00
my $ defaults = {
2016-03-03 15:49:59 +01:00
map {
my $ default = $ confdesc - > { $ _ } - > { default } ;
defined ( $ default ) ? ( $ _ = > $ default ) : ( )
} keys %$ confdesc
2011-10-12 08:25:18 +02:00
} ;
2020-12-21 14:48:16 +01:00
$ parse_prune_backups_maxfiles - > ( $ defaults , "defaults in VZDump schema" ) ;
2011-10-12 08:25:18 +02:00
2015-07-27 13:14:30 +02:00
my $ raw ;
eval { $ raw = PVE::Tools:: file_get_contents ( $ fn ) ; } ;
return $ defaults if $@ ;
2011-10-12 08:25:18 +02:00
2015-07-27 13:14:30 +02:00
my $ conf_schema = { type = > 'object' , properties = > $ confdesc , } ;
my $ res = PVE::JSONSchema:: parse_config ( $ conf_schema , $ fn , $ raw ) ;
2016-03-03 09:22:31 +01:00
if ( my $ excludes = $ res - > { 'exclude-path' } ) {
$ res - > { 'exclude-path' } = PVE::Tools:: split_args ( $ excludes ) ;
}
2016-04-14 14:23:40 +02:00
if ( defined ( $ res - > { mailto } ) ) {
2020-06-17 15:45:21 +02:00
my @ mailto = split_list ( $ res - > { mailto } ) ;
2016-04-14 14:23:40 +02:00
$ res - > { mailto } = [ @ mailto ] ;
}
2020-12-21 14:48:16 +01:00
$ parse_prune_backups_maxfiles - > ( $ res , "options in '$fn'" ) ;
2015-07-27 13:14:30 +02:00
foreach my $ key ( keys %$ defaults ) {
2016-03-31 15:46:41 +02:00
$ res - > { $ key } = $ defaults - > { $ key } if ! defined ( $ res - > { $ key } ) ;
2011-10-12 08:25:18 +02:00
}
2020-12-04 10:15:17 +01:00
if ( defined ( $ res - > { storage } ) && defined ( $ res - > { dumpdir } ) ) {
debugmsg ( 'warn' , "both 'storage' and 'dumpdir' defined in '$fn' - ignoring 'dumpdir'" ) ;
delete $ res - > { dumpdir } ;
}
2011-10-12 08:25:18 +02:00
return $ res ;
}
2019-05-21 13:16:13 +02:00
use constant MAX_MAIL_SIZE = > 1024 * 1024 ;
2014-10-22 21:39:47 +02:00
sub sendmail {
2016-07-07 10:22:25 +02:00
my ( $ self , $ tasklist , $ totaltime , $ err , $ detail_pre , $ detail_post ) = @ _ ;
2011-10-12 08:25:18 +02:00
my $ opts = $ self - > { opts } ;
my $ mailto = $ opts - > { mailto } ;
2011-10-13 14:33:12 +02:00
return if ! ( $ mailto && scalar ( @$ mailto ) ) ;
2011-10-12 08:25:18 +02:00
my $ cmdline = $ self - > { cmdline } ;
my $ ecount = 0 ;
foreach my $ task ( @$ tasklist ) {
$ ecount + + if $ task - > { state } ne 'ok' ;
chomp $ task - > { msg } if $ task - > { msg } ;
$ task - > { backuptime } = 0 if ! $ task - > { backuptime } ;
$ task - > { size } = 0 if ! $ task - > { size } ;
2020-07-03 16:45:19 +02:00
$ task - > { target } = 'unknown' if ! $ task - > { target } ;
2011-10-12 08:25:18 +02:00
$ task - > { hostname } = "VM $task->{vmid}" if ! $ task - > { hostname } ;
if ( $ task - > { state } eq 'todo' ) {
$ task - > { msg } = 'aborted' ;
}
}
2014-11-26 06:55:55 +01:00
my $ notify = $ opts - > { mailnotification } || 'always' ;
return if ( ! $ ecount && ! $ err && ( $ notify eq 'failure' ) ) ;
2014-11-21 10:27:38 +01:00
2014-10-22 21:39:47 +02:00
my $ stat = ( $ ecount || $ err ) ? 'backup failed' : 'backup successful' ;
2016-06-06 12:17:47 +02:00
if ( $ err ) {
if ( $ err =~ /\n/ ) {
$ stat . = ": multiple problems" ;
} else {
$ stat . = ": $err" ;
$ err = undef ;
}
}
2011-10-12 08:25:18 +02:00
2011-10-13 14:33:12 +02:00
my $ hostname = `hostname -f` || PVE::INotify:: nodename ( ) ;
2011-10-12 08:25:18 +02:00
chomp $ hostname ;
# text part
2016-06-06 12:17:47 +02:00
my $ text = $ err ? "$err\n\n" : '' ;
2020-11-17 11:46:52 +01:00
my $ namelength = 20 ;
$ text . = sprintf (
"%-10s %-${namelength}s %-6s %10s %10s %s\n" ,
qw( VMID NAME STATUS TIME SIZE FILENAME )
) ;
2011-10-12 08:25:18 +02:00
foreach my $ task ( @$ tasklist ) {
2020-11-17 11:46:52 +01:00
my $ name = substr ( $ task - > { hostname } , 0 , $ namelength ) ;
my $ successful = $ task - > { state } eq 'ok' ;
my $ size = $ successful ? format_size ( $ task - > { size } ) : 0 ;
my $ filename = $ successful ? $ task - > { target } : '-' ;
my $ size_fmt = $ successful ? "%10s" : "%8.2fMB" ;
$ text . = sprintf (
"%-10s %-${namelength}s %-6s %10s $size_fmt %s\n" ,
$ task - > { vmid } ,
$ name ,
$ task - > { state } ,
format_time ( $ task - > { backuptime } ) ,
$ size ,
$ filename ,
) ;
2011-10-12 08:25:18 +02:00
}
2016-04-04 16:23:09 +02:00
2019-05-21 13:16:13 +02:00
my $ text_log_part ;
$ text_log_part . = "\nDetailed backup logs:\n\n" ;
$ text_log_part . = "$cmdline\n\n" ;
2011-10-12 08:25:18 +02:00
2019-05-21 13:16:13 +02:00
$ text_log_part . = $ detail_pre . "\n" if defined ( $ detail_pre ) ;
2011-10-12 08:25:18 +02:00
foreach my $ task ( @$ tasklist ) {
my $ vmid = $ task - > { vmid } ;
my $ log = $ task - > { tmplog } ;
if ( ! $ log ) {
2019-05-21 13:16:13 +02:00
$ text_log_part . = "$vmid: no log available\n\n" ;
2011-10-12 08:25:18 +02:00
next ;
}
2021-02-25 19:17:40 +01:00
if ( open ( my $ TMP , '<' , "$log" ) ) {
while ( my $ line = <$TMP> ) {
2019-05-21 13:16:13 +02:00
next if $ line =~ /^status: \d+/ ; # not useful in mails
$ text_log_part . = encode8bit ( "$vmid: $line" ) ;
}
2021-02-25 19:17:40 +01:00
close ( $ TMP ) ;
2019-05-21 13:16:13 +02:00
} else {
$ text_log_part . = "$vmid: Could not open log file\n\n" ;
}
$ text_log_part . = "\n" ;
2011-10-12 08:25:18 +02:00
}
2019-05-21 13:16:13 +02:00
$ text_log_part . = $ detail_post if defined ( $ detail_post ) ;
2011-10-12 08:25:18 +02:00
# html part
2016-04-04 16:23:09 +02:00
my $ html = "<html><body>\n" ;
2016-06-06 12:17:47 +02:00
$ html . = "<p>" . ( escape_html ( $ err ) =~ s/\n/<br>/g r ) . "</p>\n" if $ err ;
2016-04-04 16:23:09 +02:00
$ html . = "<table border=1 cellpadding=3>\n" ;
$ html . = "<tr><td>VMID<td>NAME<td>STATUS<td>TIME<td>SIZE<td>FILENAME</tr>\n" ;
2011-10-12 08:25:18 +02:00
my $ ssize = 0 ;
foreach my $ task ( @$ tasklist ) {
my $ vmid = $ task - > { vmid } ;
my $ name = $ task - > { hostname } ;
if ( $ task - > { state } eq 'ok' ) {
$ ssize += $ task - > { size } ;
2021-02-25 19:21:18 +01:00
$ html . = sprintf (
"<tr><td>%s<td>%s<td>OK<td>%s<td align=right>%s<td>%s</tr>\n" ,
$ vmid ,
$ name ,
format_time ( $ task - > { backuptime } ) ,
format_size ( $ task - > { size } ) ,
escape_html ( $ task - > { target } ) ,
) ;
2011-10-12 08:25:18 +02:00
} else {
2021-02-25 19:21:18 +01:00
$ html . = sprintf (
"<tr><td>%s<td>%s<td><font color=red>FAILED<td>%s<td colspan=2>%s</tr>\n" ,
$ vmid ,
$ name ,
format_time ( $ task - > { backuptime } ) ,
escape_html ( $ task - > { msg } ) ,
) ;
2011-10-12 08:25:18 +02:00
}
}
2016-04-04 16:23:09 +02:00
$ html . = sprintf ( "<tr><td align=left colspan=3>TOTAL<td>%s<td>%s<td></tr>" ,
2011-10-12 08:25:18 +02:00
format_time ( $ totaltime ) , format_size ( $ ssize ) ) ;
2019-05-21 13:16:13 +02:00
$ html . = "\n</table><br><br>\n" ;
my $ html_log_part ;
$ html_log_part . = "Detailed backup logs:<br /><br />\n" ;
$ html_log_part . = "<pre>\n" ;
$ html_log_part . = escape_html ( $ cmdline ) . "\n\n" ;
2011-10-12 08:25:18 +02:00
2019-05-21 13:16:13 +02:00
$ html_log_part . = escape_html ( $ detail_pre ) . "\n" if defined ( $ detail_pre ) ;
2011-10-12 08:25:18 +02:00
foreach my $ task ( @$ tasklist ) {
my $ vmid = $ task - > { vmid } ;
my $ log = $ task - > { tmplog } ;
if ( ! $ log ) {
2019-05-21 13:16:13 +02:00
$ html_log_part . = "$vmid: no log available\n\n" ;
2011-10-12 08:25:18 +02:00
next ;
}
2021-02-25 19:17:40 +01:00
if ( open ( my $ TMP , '<' , "$log" ) ) {
while ( my $ line = <$TMP> ) {
2019-05-21 13:16:13 +02:00
next if $ line =~ /^status: \d+/ ; # not useful in mails
if ( $ line =~ m/^\S+\s\d+\s+\d+:\d+:\d+\s+(ERROR|WARN):/ ) {
$ html_log_part . = encode8bit ( "$vmid: <font color=red>" .
escape_html ( $ line ) . "</font>" ) ;
} else {
$ html_log_part . = encode8bit ( "$vmid: " . escape_html ( $ line ) ) ;
}
2011-10-12 08:25:18 +02:00
}
2021-02-25 19:17:40 +01:00
close ( $ TMP ) ;
2019-05-21 13:16:13 +02:00
} else {
$ html_log_part . = "$vmid: Could not open log file\n\n" ;
2011-10-12 08:25:18 +02:00
}
2019-05-21 13:16:13 +02:00
$ html_log_part . = "\n" ;
2011-10-12 08:25:18 +02:00
}
2019-05-21 13:16:13 +02:00
$ html_log_part . = escape_html ( $ detail_post ) if defined ( $ detail_post ) ;
$ html_log_part . = "</pre>" ;
2020-12-21 14:48:17 +01:00
my $ html_end = "\n</body></html>\n" ;
2016-04-04 16:23:09 +02:00
# end html part
2011-10-12 08:25:18 +02:00
2019-05-21 13:16:13 +02:00
if ( length ( $ text ) + length ( $ text_log_part ) +
2020-12-21 14:48:17 +01:00
length ( $ html ) + length ( $ html_log_part ) +
length ( $ html_end ) < MAX_MAIL_SIZE )
2019-05-21 13:16:13 +02:00
{
$ html . = $ html_log_part ;
2020-12-21 14:48:17 +01:00
$ html . = $ html_end ;
2019-05-21 13:16:13 +02:00
$ text . = $ text_log_part ;
} else {
my $ msg = "Log output was too long to be sent by mail. " .
"See Task History for details!\n" ;
$ text . = $ msg ;
$ html . = "<p>$msg</p>" ;
$ html . = $ html_end ;
}
2016-04-04 16:23:09 +02:00
my $ subject = "vzdump backup status ($hostname) : $stat" ;
2011-10-12 08:25:18 +02:00
2016-04-04 16:23:09 +02:00
my $ dcconf = PVE::Cluster:: cfs_read_file ( 'datacenter.cfg' ) ;
my $ mailfrom = $ dcconf - > { email_from } || "root" ;
2011-10-12 08:25:18 +02:00
2016-04-04 16:23:09 +02:00
PVE::Tools:: sendmail ( $ mailto , $ subject , $ text , $ html , $ mailfrom , "vzdump backup tool" ) ;
2011-10-12 08:25:18 +02:00
} ;
sub new {
2011-11-28 10:18:28 +01:00
my ( $ class , $ cmdline , $ opts , $ skiplist ) = @ _ ;
2011-10-12 08:25:18 +02:00
mkpath $ logdir ;
check_bin ( 'cp' ) ;
check_bin ( 'df' ) ;
check_bin ( 'sendmail' ) ;
check_bin ( 'rsync' ) ;
check_bin ( 'tar' ) ;
check_bin ( 'mount' ) ;
check_bin ( 'umount' ) ;
check_bin ( 'cstream' ) ;
check_bin ( 'ionice' ) ;
2011-10-28 08:00:34 +02:00
if ( $ opts - > { mode } && $ opts - > { mode } eq 'snapshot' ) {
2011-10-12 08:25:18 +02:00
check_bin ( 'lvcreate' ) ;
check_bin ( 'lvs' ) ;
check_bin ( 'lvremove' ) ;
}
my $ defaults = read_vzdump_defaults ( ) ;
foreach my $ k ( keys %$ defaults ) {
2020-12-01 09:24:21 +01:00
next if $ k eq 'exclude-path' || $ k eq 'prune-backups' ; # dealt with separately
2011-10-12 08:25:18 +02:00
if ( $ k eq 'dumpdir' || $ k eq 'storage' ) {
$ opts - > { $ k } = $ defaults - > { $ k } if ! defined ( $ opts - > { dumpdir } ) &&
! defined ( $ opts - > { storage } ) ;
} else {
$ opts - > { $ k } = $ defaults - > { $ k } if ! defined ( $ opts - > { $ k } ) ;
}
}
$ opts - > { dumpdir } =~ s | / + $| | if ( $ opts - > { dumpdir } ) ;
$ opts - > { tmpdir } =~ s | / + $| | if ( $ opts - > { tmpdir } ) ;
2011-11-28 10:18:28 +01:00
$ skiplist = [] if ! $ skiplist ;
2021-02-25 19:19:03 +01:00
my $ self = bless {
cmdline = > $ cmdline ,
opts = > $ opts ,
skiplist = > $ skiplist ,
} , $ class ;
2011-10-12 08:25:18 +02:00
2015-09-03 15:46:24 +02:00
my $ findexcl = $ self - > { findexcl } = [] ;
2011-11-28 08:14:58 +01:00
if ( $ defaults - > { 'exclude-path' } ) {
2015-09-03 15:46:24 +02:00
push @$ findexcl , @ { $ defaults - > { 'exclude-path' } } ;
2011-11-28 08:14:58 +01:00
}
2011-10-12 08:25:18 +02:00
if ( $ opts - > { 'exclude-path' } ) {
2015-09-03 15:46:24 +02:00
push @$ findexcl , @ { $ opts - > { 'exclude-path' } } ;
2011-10-12 08:25:18 +02:00
}
if ( $ opts - > { stdexcludes } ) {
2021-02-25 19:21:18 +01:00
push @$ findexcl ,
'/tmp/?*' ,
'/var/tmp/?*' ,
'/var/run/?*.pid' ,
;
2011-10-12 08:25:18 +02:00
}
foreach my $ p ( @ plugins ) {
2021-02-25 19:21:18 +01:00
my $ pd = $ p - > new ( $ self ) ;
2011-10-12 08:25:18 +02:00
push @ { $ self - > { plugins } } , $ pd ;
}
2020-02-19 13:59:43 +01:00
if ( defined ( $ opts - > { storage } ) && $ opts - > { stdout } ) {
2020-06-30 10:24:24 +02:00
die "cannot use options 'storage' and 'stdout' at the same time\n" ;
} elsif ( defined ( $ opts - > { storage } ) && defined ( $ opts - > { dumpdir } ) ) {
die "cannot use options 'storage' and 'dumpdir' at the same time\n" ;
2020-02-19 13:59:43 +01:00
}
2011-10-12 08:25:18 +02:00
if ( ! $ opts - > { dumpdir } && ! $ opts - > { storage } ) {
2012-03-23 12:35:10 +01:00
$ opts - > { storage } = 'local' ;
2011-10-12 08:25:18 +02:00
}
2016-06-06 12:17:47 +02:00
my $ errors = '' ;
2011-10-12 08:25:18 +02:00
if ( $ opts - > { storage } ) {
2021-03-11 10:22:05 +01:00
my $ storage_cfg = PVE::Storage:: config ( ) ;
eval { PVE::Storage:: activate_storage ( $ storage_cfg , $ opts - > { storage } ) } ;
if ( my $ err = $@ ) {
chomp ( $ err ) ;
$ errors . = "could not activate storage '$opts->{storage}': $err" ;
}
2019-06-11 17:37:34 +02:00
my $ info = eval { storage_info ( $ opts - > { storage } ) } ;
2020-09-29 10:37:04 +02:00
if ( my $ err = $@ ) {
2020-12-21 14:48:18 +01:00
chomp ( $ err ) ;
2020-09-29 10:37:04 +02:00
$ errors . = "could not get storage information for '$opts->{storage}': $err" ;
} else {
$ opts - > { dumpdir } = $ info - > { dumpdir } ;
$ opts - > { scfg } = $ info - > { scfg } ;
$ opts - > { pbs } = $ info - > { pbs } ;
2020-12-01 09:24:21 +01:00
$ opts - > { 'prune-backups' } // = $ info - > { 'prune-backups' } ;
2020-09-29 10:37:04 +02:00
}
2011-10-12 08:25:18 +02:00
} elsif ( $ opts - > { dumpdir } ) {
2016-06-06 12:17:47 +02:00
$ errors . = "dumpdir '$opts->{dumpdir}' does not exist"
2011-10-12 08:25:18 +02:00
if ! - d $ opts - > { dumpdir } ;
} else {
2019-06-19 12:08:36 +02:00
die "internal error" ;
2011-10-12 08:25:18 +02:00
}
2020-12-01 09:24:21 +01:00
$ opts - > { 'prune-backups' } // = $ defaults - > { 'prune-backups' } ;
2020-06-30 10:24:26 +02:00
2020-11-23 13:33:10 +01:00
# avoid triggering any remove code path if keep-all is set
$ opts - > { remove } = 0 if $ opts - > { 'prune-backups' } - > { 'keep-all' } ;
2011-10-12 08:25:18 +02:00
if ( $ opts - > { tmpdir } && ! - d $ opts - > { tmpdir } ) {
2016-06-06 12:17:47 +02:00
$ errors . = "\n" if $ errors ;
$ errors . = "tmpdir '$opts->{tmpdir}' does not exist" ;
}
if ( $ errors ) {
eval { $ self - > sendmail ( [] , 0 , $ errors ) ; } ;
debugmsg ( 'err' , $@ ) if $@ ;
die "$errors\n" ;
2011-10-12 08:25:18 +02:00
}
return $ self ;
}
sub get_mount_info {
my ( $ dir ) = @ _ ;
2012-04-16 09:40:26 +02:00
# Note: df 'available' can be negative, and percentage set to '-'
2012-03-01 10:44:22 +01:00
my $ cmd = [ 'df' , '-P' , '-T' , '-B' , '1' , $ dir ] ;
2011-10-12 08:25:18 +02:00
2012-03-01 10:44:22 +01:00
my $ res ;
2011-10-12 08:25:18 +02:00
2012-03-01 10:44:22 +01:00
my $ parser = sub {
my $ line = shift ;
2012-04-16 09:40:26 +02:00
if ( my ( $ fsid , $ fstype , undef , $ mp ) = $ line =~
m !(\S+.*)\s+(\S+)\s+\d+\s+\-?\d+\s+\d+\s+(\d+%|-)\s+(/.*)$! ) {
2012-03-01 10:44:22 +01:00
$ res = {
device = > $ fsid ,
fstype = > $ fstype ,
mountpoint = > $ mp ,
} ;
}
2011-10-12 08:25:18 +02:00
} ;
2012-03-01 10:44:22 +01:00
eval { PVE::Tools:: run_command ( $ cmd , errfunc = > sub { } , outfunc = > $ parser ) ; } ;
warn $@ if $@ ;
return $ res ;
2011-10-12 08:25:18 +02:00
}
sub getlock {
2015-01-20 09:26:31 +01:00
my ( $ self , $ upid ) = @ _ ;
2011-10-12 08:25:18 +02:00
2014-12-30 14:30:11 +01:00
my $ fh ;
2019-06-19 12:08:36 +02:00
2011-10-12 08:25:18 +02:00
my $ maxwait = $ self - > { opts } - > { lockwait } || $ self - > { lockwait } ;
2019-06-19 12:08:36 +02:00
2020-09-29 10:07:13 +02:00
die "missing UPID" if ! $ upid ; # should not happen
2015-01-20 09:26:31 +01:00
2021-02-25 19:17:40 +01:00
my $ SERVER_FLCK ;
if ( ! open ( $ SERVER_FLCK , '>>' , "$lockfile" ) ) {
2011-10-12 08:25:18 +02:00
debugmsg ( 'err' , "can't open lock on file '$lockfile' - $!" , undef , 1 ) ;
2014-10-22 21:39:47 +02:00
die "can't open lock on file '$lockfile' - $!" ;
2011-10-12 08:25:18 +02:00
}
2021-02-25 19:17:40 +01:00
if ( ! flock ( $ SERVER_FLCK , LOCK_EX | LOCK_NB ) ) {
2015-01-20 09:26:31 +01:00
if ( ! $ maxwait ) {
2019-06-15 10:40:20 +02:00
debugmsg ( 'err' , "can't acquire lock '$lockfile' (wait = 0)" , undef , 1 ) ;
die "can't acquire lock '$lockfile' (wait = 0)" ;
2015-01-20 09:26:31 +01:00
}
2011-10-12 08:25:18 +02:00
2015-01-20 09:26:31 +01:00
debugmsg ( 'info' , "trying to get global lock - waiting..." , undef , 1 ) ;
eval {
alarm ( $ maxwait * 60 ) ;
2019-06-19 12:08:36 +02:00
2015-01-20 09:26:31 +01:00
local $ SIG { ALRM } = sub { alarm ( 0 ) ; die "got timeout\n" ; } ;
2011-10-12 08:25:18 +02:00
2021-02-25 19:17:40 +01:00
if ( ! flock ( $ SERVER_FLCK , LOCK_EX ) ) {
2015-01-20 09:26:31 +01:00
my $ err = $! ;
2021-02-25 19:17:40 +01:00
close ( $ SERVER_FLCK ) ;
2015-01-20 09:26:31 +01:00
alarm ( 0 ) ;
die "$err\n" ;
}
2011-10-12 08:25:18 +02:00
alarm ( 0 ) ;
2015-01-20 09:26:31 +01:00
} ;
2011-10-12 08:25:18 +02:00
alarm ( 0 ) ;
2019-06-19 12:08:36 +02:00
2015-01-20 09:26:31 +01:00
my $ err = $@ ;
2019-06-19 12:08:36 +02:00
2015-01-20 09:26:31 +01:00
if ( $ err ) {
2019-06-15 10:40:20 +02:00
debugmsg ( 'err' , "can't acquire lock '$lockfile' - $err" , undef , 1 ) ;
die "can't acquire lock '$lockfile' - $err" ;
2015-01-20 09:26:31 +01:00
}
2011-10-12 08:25:18 +02:00
2015-01-20 09:26:31 +01:00
debugmsg ( 'info' , "got global lock" , undef , 1 ) ;
2011-10-12 08:25:18 +02:00
}
2015-01-20 09:26:31 +01:00
PVE::Tools:: file_set_contents ( $ pidfile , $ upid ) ;
2021-04-12 10:47:31 +02:00
return $ SERVER_FLCK ;
2011-10-12 08:25:18 +02:00
}
sub run_hook_script {
my ( $ self , $ phase , $ task , $ logfd ) = @ _ ;
my $ opts = $ self - > { opts } ;
my $ script = $ opts - > { script } ;
return if ! $ script ;
2020-03-12 12:18:18 +01:00
if ( ! - x $ script ) {
2020-03-12 17:38:59 +01:00
die "The hook script '$script' is not executable.\n" ;
2020-03-12 12:18:18 +01:00
}
fix #3430: handle hook script paths better
if passing the hook script command as string, it might get interpreted
as shell command with side-effects. this is pretty harmless, since only
root is allowed to set the script parameter anyway, but making it more
robust and future-proof does not hurt.
tested with a reproducer of "/bin/echo $(touch $(whoami))" as script
parameter, with a file with that name existing, being executable and
having the following contents:
----8<----
echo "hello from hook script"
---->8----
without this change, the hookscript itself is not executed, but
'/bin/sh -c "/bin/echo $(touch $(whoami)) job"' and similar calls are,
which cause the file 'root' to be touched in the current working
directory of the vzdump process (or task worker).
with this change, the file is executed as is without any side-effects of
shell commands in the file name, and the 'hello from hook script' lines
are printed whenever the hook script is called by vzdump.
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-05-17 09:46:47 +02:00
my $ cmd = [ $ script , $ phase ] ;
2011-10-12 08:25:18 +02:00
fix #3430: handle hook script paths better
if passing the hook script command as string, it might get interpreted
as shell command with side-effects. this is pretty harmless, since only
root is allowed to set the script parameter anyway, but making it more
robust and future-proof does not hurt.
tested with a reproducer of "/bin/echo $(touch $(whoami))" as script
parameter, with a file with that name existing, being executable and
having the following contents:
----8<----
echo "hello from hook script"
---->8----
without this change, the hookscript itself is not executed, but
'/bin/sh -c "/bin/echo $(touch $(whoami)) job"' and similar calls are,
which cause the file 'root' to be touched in the current working
directory of the vzdump process (or task worker).
with this change, the file is executed as is without any side-effects of
shell commands in the file name, and the 'hello from hook script' lines
are printed whenever the hook script is called by vzdump.
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
2021-05-17 09:46:47 +02:00
if ( $ task ) {
push @$ cmd , $ task - > { mode } ;
push @$ cmd , $ task - > { vmid } ;
}
2011-10-12 08:25:18 +02:00
local % ENV ;
2013-09-13 06:53:03 +02:00
# set immutable opts directly (so they are available in all phases)
$ ENV { STOREID } = $ opts - > { storage } if $ opts - > { storage } ;
$ ENV { DUMPDIR } = $ opts - > { dumpdir } if $ opts - > { dumpdir } ;
2020-07-03 16:45:19 +02:00
foreach my $ ek ( qw( vmtype hostname target logfile ) ) {
2011-10-12 08:25:18 +02:00
$ ENV { uc ( $ ek ) } = $ task - > { $ ek } if $ task - > { $ ek } ;
}
2020-07-14 10:09:19 +02:00
# FIXME: for backwards compatibility - drop with PVE 7.0
$ ENV { TARFILE } = $ task - > { target } if $ task - > { target } ;
2011-10-12 08:25:18 +02:00
run_command ( $ logfd , $ cmd ) ;
}
2012-02-07 10:29:57 +01:00
sub compressor_info {
2015-07-22 18:14:50 +02:00
my ( $ opts ) = @ _ ;
my $ opt_compress = $ opts - > { compress } ;
2012-02-07 10:29:57 +01:00
if ( ! $ opt_compress || $ opt_compress eq '0' ) {
return undef ;
} elsif ( $ opt_compress eq '1' || $ opt_compress eq 'lzo' ) {
return ( 'lzop' , 'lzo' ) ;
} elsif ( $ opt_compress eq 'gzip' ) {
2015-07-22 18:14:50 +02:00
if ( $ opts - > { pigz } > 0 ) {
2017-09-13 11:10:57 +02:00
my $ pigz_threads = $ opts - > { pigz } ;
if ( $ pigz_threads == 1 ) {
my $ cpuinfo = PVE::ProcFSTools:: read_cpuinfo ( ) ;
$ pigz_threads = int ( ( $ cpuinfo - > { cpus } + 1 ) / 2 ) ;
}
2019-12-16 11:16:38 +01:00
return ( "pigz -p ${pigz_threads} --rsyncable" , 'gz' ) ;
2015-07-22 18:14:50 +02:00
} else {
2019-08-07 14:12:10 +02:00
return ( 'gzip --rsyncable' , 'gz' ) ;
2015-07-22 18:14:50 +02:00
}
2020-04-28 15:58:09 +02:00
} elsif ( $ opt_compress eq 'zstd' ) {
my $ zstd_threads = $ opts - > { zstd } // 1 ;
if ( $ zstd_threads == 0 ) {
my $ cpuinfo = PVE::ProcFSTools:: read_cpuinfo ( ) ;
$ zstd_threads = int ( ( $ cpuinfo - > { cpus } + 1 ) / 2 ) ;
}
2020-05-04 12:19:31 +02:00
return ( "zstd --rsyncable --threads=${zstd_threads}" , 'zst' ) ;
2012-02-07 10:29:57 +01:00
} else {
die "internal error - unknown compression option '$opt_compress'" ;
}
}
2012-03-23 09:56:41 +01:00
sub get_backup_file_list {
my ( $ dir , $ bkname , $ exclude_fn ) = @ _ ;
my $ bklist = [] ;
foreach my $ fn ( <$dir/${bkname}-*> ) {
next if $ exclude_fn && $ fn eq $ exclude_fn ;
2020-07-09 14:45:45 +02:00
my $ archive_info = eval { PVE::Storage:: archive_info ( $ fn ) } // { } ;
if ( $ archive_info - > { is_std_name } ) {
my $ filename = $ archive_info - > { filename } ;
my $ backup = {
'path' = > "$dir/$filename" ,
'ctime' = > $ archive_info - > { ctime } ,
} ;
push @ { $ bklist } , $ backup ;
2012-03-23 09:56:41 +01:00
}
}
return $ bklist ;
}
2019-06-19 12:08:36 +02:00
2011-10-12 08:25:18 +02:00
sub exec_backup_task {
my ( $ self , $ task ) = @ _ ;
2019-06-19 12:08:36 +02:00
2011-10-12 08:25:18 +02:00
my $ opts = $ self - > { opts } ;
2020-09-29 10:37:04 +02:00
my $ cfg = PVE::Storage:: config ( ) ;
2011-10-12 08:25:18 +02:00
my $ vmid = $ task - > { vmid } ;
my $ plugin = $ task - > { plugin } ;
2020-02-19 13:59:43 +01:00
$ task - > { backup_time } = time ( ) ;
my $ pbs_group_name ;
my $ pbs_snapshot_name ;
2011-10-12 08:25:18 +02:00
my $ vmstarttime = time ( ) ;
2019-06-19 12:08:36 +02:00
2011-10-12 08:25:18 +02:00
my $ logfd ;
my $ cleanup = { } ;
2019-05-08 17:23:37 +00:00
my $ log_vm_online_again = sub {
return if ! defined ( $ task - > { vmstoptime } ) ;
$ task - > { vmconttime } // = time ( ) ;
my $ delay = $ task - > { vmconttime } - $ task - > { vmstoptime } ;
2020-07-14 10:26:14 +02:00
$ delay = '<1' if $ delay < 1 ;
2019-05-08 17:23:37 +00:00
debugmsg ( 'info' , "guest is online again after $delay seconds" , $ logfd ) ;
} ;
2011-10-12 08:25:18 +02:00
eval {
die "unable to find VM '$vmid'\n" if ! $ plugin ;
2020-10-22 12:30:12 +02:00
my $ vmtype = $ plugin - > type ( ) ;
if ( $ self - > { opts } - > { pbs } ) {
if ( $ vmtype eq 'lxc' ) {
$ pbs_group_name = "ct/$vmid" ;
} elsif ( $ vmtype eq 'qemu' ) {
$ pbs_group_name = "vm/$vmid" ;
} else {
die "pbs backup not implemented for plugin type '$vmtype'\n" ;
}
my $ btime = strftime ( "%FT%TZ" , gmtime ( $ task - > { backup_time } ) ) ;
$ pbs_snapshot_name = "$pbs_group_name/$btime" ;
}
2015-09-18 11:21:03 +02:00
# for now we deny backups of a running ha managed service in *stop* mode
2016-11-22 14:45:02 +01:00
# as it interferes with the HA stack (started services should not stop).
2015-09-18 11:21:03 +02:00
if ( $ opts - > { mode } eq 'stop' &&
2016-11-22 14:45:02 +01:00
PVE::HA::Config:: vm_is_ha_managed ( $ vmid , 'started' ) )
2015-09-18 11:21:03 +02:00
{
die "Cannot execute a backup with stop mode on a HA managed and" .
" enabled Service. Use snapshot mode or disable the Service.\n" ;
}
2011-10-12 08:25:18 +02:00
my $ tmplog = "$logdir/$vmtype-$vmid.log" ;
my $ bkname = "vzdump-$vmtype-$vmid" ;
2020-02-19 13:59:43 +01:00
my $ basename = $ bkname . strftime ( "-%Y_%m_%d-%H_%M_%S" , localtime ( $ task - > { backup_time } ) ) ;
2011-10-12 08:25:18 +02:00
2020-09-29 10:37:04 +02:00
my $ prune_options = $ opts - > { 'prune-backups' } ;
my $ backup_limit = 0 ;
2020-11-23 15:54:15 +01:00
if ( ! $ prune_options - > { 'keep-all' } ) {
2020-11-23 13:33:10 +01:00
foreach my $ keep ( values % { $ prune_options } ) {
$ backup_limit += $ keep ;
}
2020-09-29 10:37:04 +02:00
}
2012-03-23 09:56:41 +01:00
2020-09-29 10:37:04 +02:00
if ( $ backup_limit && ! $ opts - > { remove } ) {
2020-02-19 13:59:43 +01:00
my $ count ;
2020-05-06 10:57:52 +02:00
if ( $ self - > { opts } - > { pbs } ) {
2020-02-19 13:59:43 +01:00
my $ res = PVE::Storage::PBSPlugin:: run_client_cmd ( $ opts - > { scfg } , $ opts - > { storage } , 'snapshots' , $ pbs_group_name ) ;
$ count = scalar ( @$ res ) ;
} else {
my $ bklist = get_backup_file_list ( $ opts - > { dumpdir } , $ bkname ) ;
$ count = scalar ( @$ bklist ) ;
}
2020-09-29 10:37:04 +02:00
die "There is a max backup limit of $backup_limit enforced by the" .
2020-02-19 13:59:43 +01:00
" target storage or the vzdump parameters." .
" Either increase the limit or delete old backup(s).\n"
2020-09-29 10:37:04 +02:00
if $ count >= $ backup_limit ;
2012-03-23 09:56:41 +01:00
}
2020-05-06 10:57:52 +02:00
if ( ! $ self - > { opts } - > { pbs } ) {
2020-02-19 13:59:43 +01:00
$ task - > { logfile } = "$opts->{dumpdir}/$basename.log" ;
}
2011-10-12 08:25:18 +02:00
2012-12-13 12:55:09 +01:00
my $ ext = $ vmtype eq 'qemu' ? '.vma' : '.tar' ;
2015-07-22 18:14:50 +02:00
my ( $ comp , $ comp_ext ) = compressor_info ( $ opts ) ;
2012-02-07 10:29:57 +01:00
if ( $ comp && $ comp_ext ) {
$ ext . = ".${comp_ext}" ;
}
2011-10-12 08:25:18 +02:00
2020-05-06 10:57:52 +02:00
if ( $ self - > { opts } - > { pbs } ) {
2020-02-19 13:59:43 +01:00
die "unable to pipe backup to stdout\n" if $ opts - > { stdout } ;
2020-07-03 16:49:19 +02:00
$ task - > { target } = $ pbs_snapshot_name ;
2011-10-12 08:25:18 +02:00
} else {
2020-02-19 13:59:43 +01:00
if ( $ opts - > { stdout } ) {
2020-07-03 16:45:19 +02:00
$ task - > { target } = '-' ;
2020-02-19 13:59:43 +01:00
} else {
2020-07-03 16:45:19 +02:00
$ task - > { target } = $ task - > { tmptar } = "$opts->{dumpdir}/$basename$ext" ;
2020-02-19 13:59:43 +01:00
$ task - > { tmptar } =~ s/\.[^\.]+$/\.dat/ ;
unlink $ task - > { tmptar } ;
}
2011-10-12 08:25:18 +02:00
}
$ task - > { vmtype } = $ vmtype ;
2020-10-19 16:15:25 +02:00
my $ pid = $$ ;
2020-08-06 16:03:03 +02:00
if ( $ opts - > { tmpdir } ) {
2020-10-19 16:15:25 +02:00
$ task - > { tmpdir } = "$opts->{tmpdir}/vzdumptmp${pid}_$vmid/" ;
2020-08-06 16:03:03 +02:00
} elsif ( $ self - > { opts } - > { pbs } ) {
2020-10-19 16:15:25 +02:00
$ task - > { tmpdir } = "/var/tmp/vzdumptmp${pid}_$vmid" ;
2011-10-12 08:25:18 +02:00
} else {
# dumpdir is posix? then use it as temporary dir
2011-11-14 09:26:10 +01:00
my $ info = get_mount_info ( $ opts - > { dumpdir } ) ;
2019-06-19 12:08:36 +02:00
if ( $ vmtype eq 'qemu' ||
2011-10-12 08:25:18 +02:00
grep ( $ _ eq $ info - > { fstype } , @ posix_filesystems ) ) {
$ task - > { tmpdir } = "$opts->{dumpdir}/$basename.tmp" ;
} else {
2020-10-19 16:15:25 +02:00
$ task - > { tmpdir } = "/var/tmp/vzdumptmp${pid}_$vmid" ;
2011-10-12 08:25:18 +02:00
debugmsg ( 'info' , "filesystem type on dumpdir is '$info->{fstype}' -" .
"using $task->{tmpdir} for temporary files" , $ logfd ) ;
}
}
rmtree $ task - > { tmpdir } ;
mkdir $ task - > { tmpdir } ;
- d $ task - > { tmpdir } ||
die "unable to create temporary directory '$task->{tmpdir}'" ;
$ logfd = IO::File - > new ( ">$tmplog" ) ||
die "unable to create log file '$tmplog'" ;
$ task - > { dumpdir } = $ opts - > { dumpdir } ;
2013-09-10 12:40:10 -05:00
$ task - > { storeid } = $ opts - > { storage } ;
2020-02-19 13:59:43 +01:00
$ task - > { scfg } = $ opts - > { scfg } ;
2011-10-12 08:25:18 +02:00
$ task - > { tmplog } = $ tmplog ;
2020-02-19 13:59:43 +01:00
unlink $ task - > { logfile } if defined ( $ task - > { logfile } ) ;
2011-10-12 08:25:18 +02:00
2019-04-23 09:04:49 +02:00
debugmsg ( 'info' , "Starting Backup of VM $vmid ($vmtype)" , $ logfd , 1 ) ;
debugmsg ( 'info' , "Backup started at " . strftime ( "%F %H:%M:%S" , localtime ( ) ) ) ;
2011-10-12 08:25:18 +02:00
$ plugin - > set_logfd ( $ logfd ) ;
# test is VM is running
my ( $ running , $ status_text ) = $ plugin - > vm_status ( $ vmid ) ;
debugmsg ( 'info' , "status = ${status_text}" , $ logfd ) ;
# lock VM (prevent config changes)
$ plugin - > lock_vm ( $ vmid ) ;
$ cleanup - > { unlock } = 1 ;
# prepare
2016-08-18 09:35:00 +02:00
my $ mode = $ running ? $ task - > { mode } : 'stop' ;
2011-10-12 08:25:18 +02:00
if ( $ mode eq 'snapshot' ) {
my % saved_task = %$ task ;
eval { $ plugin - > prepare ( $ task , $ vmid , $ mode ) ; } ;
if ( my $ err = $@ ) {
die $ err if $ err !~ m/^mode failure/ ;
debugmsg ( 'info' , $ err , $ logfd ) ;
debugmsg ( 'info' , "trying 'suspend' mode instead" , $ logfd ) ;
$ mode = 'suspend' ; # so prepare is called again below
2019-06-19 12:08:36 +02:00
%$ task = % saved_task ;
2011-10-12 08:25:18 +02:00
}
}
2016-08-18 09:35:00 +02:00
$ cleanup - > { prepared } = 1 ;
2011-10-12 08:25:18 +02:00
$ task - > { mode } = $ mode ;
debugmsg ( 'info' , "backup mode: $mode" , $ logfd ) ;
debugmsg ( 'info' , "bandwidth limit: $opts->{bwlimit} KB/s" , $ logfd )
if $ opts - > { bwlimit } ;
debugmsg ( 'info' , "ionice priority: $opts->{ionice}" , $ logfd ) ;
if ( $ mode eq 'stop' ) {
$ plugin - > prepare ( $ task , $ vmid , $ mode ) ;
$ self - > run_hook_script ( 'backup-start' , $ task , $ logfd ) ;
if ( $ running ) {
debugmsg ( 'info' , "stopping vm" , $ logfd ) ;
2019-05-08 17:23:37 +00:00
$ task - > { vmstoptime } = time ( ) ;
2011-10-12 08:25:18 +02:00
$ self - > run_hook_script ( 'pre-stop' , $ task , $ logfd ) ;
$ plugin - > stop_vm ( $ task , $ vmid ) ;
$ cleanup - > { restart } = 1 ;
}
2019-06-19 12:08:36 +02:00
2011-10-12 08:25:18 +02:00
} elsif ( $ mode eq 'suspend' ) {
$ plugin - > prepare ( $ task , $ vmid , $ mode ) ;
$ self - > run_hook_script ( 'backup-start' , $ task , $ logfd ) ;
2015-04-29 09:11:44 +02:00
if ( $ vmtype eq 'lxc' ) {
# pre-suspend rsync
$ plugin - > copy_data_phase1 ( $ task , $ vmid ) ;
}
2020-07-14 10:26:47 +02:00
debugmsg ( 'info' , "suspending guest" , $ logfd ) ;
2019-05-08 17:23:37 +00:00
$ task - > { vmstoptime } = time ( ) ;
2011-10-12 08:25:18 +02:00
$ self - > run_hook_script ( 'pre-stop' , $ task , $ logfd ) ;
$ plugin - > suspend_vm ( $ task , $ vmid ) ;
$ cleanup - > { resume } = 1 ;
2015-04-29 09:11:44 +02:00
if ( $ vmtype eq 'lxc' ) {
# post-suspend rsync
$ plugin - > copy_data_phase2 ( $ task , $ vmid ) ;
2020-07-14 10:26:47 +02:00
debugmsg ( 'info' , "resuming guest" , $ logfd ) ;
2015-04-29 09:11:44 +02:00
$ cleanup - > { resume } = 0 ;
$ self - > run_hook_script ( 'pre-restart' , $ task , $ logfd ) ;
$ plugin - > resume_vm ( $ task , $ vmid ) ;
2016-04-22 10:35:29 +02:00
$ self - > run_hook_script ( 'post-restart' , $ task , $ logfd ) ;
2019-05-08 17:23:37 +00:00
$ log_vm_online_again - > ( ) ;
2015-04-29 09:11:44 +02:00
}
2019-06-19 12:08:36 +02:00
2011-10-12 08:25:18 +02:00
} elsif ( $ mode eq 'snapshot' ) {
2012-10-12 06:01:42 +02:00
$ self - > run_hook_script ( 'backup-start' , $ task , $ logfd ) ;
2011-10-12 08:25:18 +02:00
my $ snapshot_count = $ task - > { snapshot_count } || 0 ;
$ self - > run_hook_script ( 'pre-stop' , $ task , $ logfd ) ;
if ( $ snapshot_count > 1 ) {
debugmsg ( 'info' , "suspend vm to make snapshot" , $ logfd ) ;
2019-05-08 17:23:37 +00:00
$ task - > { vmstoptime } = time ( ) ;
2011-10-12 08:25:18 +02:00
$ plugin - > suspend_vm ( $ task , $ vmid ) ;
$ cleanup - > { resume } = 1 ;
}
$ plugin - > snapshot ( $ task , $ vmid ) ;
$ self - > run_hook_script ( 'pre-restart' , $ task , $ logfd ) ;
if ( $ snapshot_count > 1 ) {
debugmsg ( 'info' , "resume vm" , $ logfd ) ;
$ cleanup - > { resume } = 0 ;
$ plugin - > resume_vm ( $ task , $ vmid ) ;
2019-05-08 17:23:37 +00:00
$ log_vm_online_again - > ( ) ;
2011-10-12 08:25:18 +02:00
}
2016-04-22 10:35:29 +02:00
$ self - > run_hook_script ( 'post-restart' , $ task , $ logfd ) ;
2011-10-12 08:25:18 +02:00
} else {
die "internal error - unknown mode '$mode'\n" ;
}
# assemble archive image
$ plugin - > assemble ( $ task , $ vmid ) ;
2019-06-19 12:08:36 +02:00
# produce archive
2011-10-12 08:25:18 +02:00
if ( $ opts - > { stdout } ) {
debugmsg ( 'info' , "sending archive to stdout" , $ logfd ) ;
2012-02-07 10:29:57 +01:00
$ plugin - > archive ( $ task , $ vmid , $ task - > { tmptar } , $ comp ) ;
2011-10-12 08:25:18 +02:00
$ self - > run_hook_script ( 'backup-end' , $ task , $ logfd ) ;
return ;
}
2020-07-03 16:49:19 +02:00
my $ archive_txt = $ self - > { opts } - > { pbs } ? 'Proxmox Backup Server' : 'vzdump' ;
debugmsg ( 'info' , "creating $archive_txt archive '$task->{target}'" , $ logfd ) ;
2012-02-07 10:29:57 +01:00
$ plugin - > archive ( $ task , $ vmid , $ task - > { tmptar } , $ comp ) ;
2011-10-12 08:25:18 +02:00
2020-05-06 10:57:52 +02:00
if ( $ self - > { opts } - > { pbs } ) {
2020-07-06 22:03:40 +02:00
# size is added to task struct in guest vzdump plugins
2020-02-19 13:59:43 +01:00
} else {
2020-07-03 16:45:19 +02:00
rename ( $ task - > { tmptar } , $ task - > { target } ) ||
die "unable to rename '$task->{tmptar}' to '$task->{target}'\n" ;
2011-10-12 08:25:18 +02:00
2020-02-19 13:59:43 +01:00
# determine size
2020-07-03 16:45:19 +02:00
$ task - > { size } = ( - s $ task - > { target } ) || 0 ;
2020-02-19 13:59:43 +01:00
my $ cs = format_size ( $ task - > { size } ) ;
debugmsg ( 'info' , "archive file size: $cs" , $ logfd ) ;
}
2011-10-12 08:25:18 +02:00
# purge older backup
2020-09-29 10:37:04 +02:00
if ( $ opts - > { remove } ) {
2020-09-29 10:37:05 +02:00
if ( ! defined ( $ opts - > { storage } ) ) {
my $ bklist = get_backup_file_list ( $ opts - > { dumpdir } , $ bkname , $ task - > { target } ) ;
PVE::Storage:: prune_mark_backup_group ( $ bklist , $ prune_options ) ;
2020-09-29 10:37:04 +02:00
2020-09-29 10:37:05 +02:00
foreach my $ prune_entry ( @ { $ bklist } ) {
next if $ prune_entry - > { mark } ne 'remove' ;
my $ archive_path = $ prune_entry - > { path } ;
debugmsg ( 'info' , "delete old backup '$archive_path'" , $ logfd ) ;
PVE::Storage:: archive_remove ( $ archive_path ) ;
2020-02-19 13:59:43 +01:00
}
2020-09-29 10:37:05 +02:00
} else {
2020-09-29 10:37:04 +02:00
my $ logfunc = sub { debugmsg ( $ _ [ 0 ] , $ _ [ 1 ] , $ logfd ) } ;
PVE::Storage:: prune_backups ( $ cfg , $ opts - > { storage } , $ prune_options , $ vmid , $ vmtype , 0 , $ logfunc ) ;
2011-10-12 08:25:18 +02:00
}
}
$ self - > run_hook_script ( 'backup-end' , $ task , $ logfd ) ;
} ;
my $ err = $@ ;
if ( $ plugin ) {
# clean-up
if ( $ cleanup - > { unlock } ) {
eval { $ plugin - > unlock_vm ( $ vmid ) ; } ;
warn $@ if $@ ;
}
2016-08-18 09:35:00 +02:00
if ( $ cleanup - > { prepared } ) {
2015-10-03 15:30:45 +02:00
# only call cleanup when necessary (when prepare was executed)
eval { $ plugin - > cleanup ( $ task , $ vmid ) } ;
warn $@ if $@ ;
}
2011-10-12 08:25:18 +02:00
eval { $ plugin - > set_logfd ( undef ) ; } ;
warn $@ if $@ ;
2019-06-19 12:08:36 +02:00
if ( $ cleanup - > { resume } || $ cleanup - > { restart } ) {
eval {
2011-10-12 08:25:18 +02:00
$ self - > run_hook_script ( 'pre-restart' , $ task , $ logfd ) ;
if ( $ cleanup - > { resume } ) {
debugmsg ( 'info' , "resume vm" , $ logfd ) ;
$ plugin - > resume_vm ( $ task , $ vmid ) ;
} else {
2012-12-13 12:55:09 +01:00
my $ running = $ plugin - > vm_status ( $ vmid ) ;
if ( ! $ running ) {
debugmsg ( 'info' , "restarting vm" , $ logfd ) ;
$ plugin - > start_vm ( $ task , $ vmid ) ;
}
2016-04-22 10:35:29 +02:00
}
$ self - > run_hook_script ( 'post-restart' , $ task , $ logfd ) ;
2011-10-12 08:25:18 +02:00
} ;
my $ err = $@ ;
if ( $ err ) {
warn $ err ;
} else {
2019-05-08 17:23:37 +00:00
$ log_vm_online_again - > ( ) ;
2011-10-12 08:25:18 +02:00
}
}
}
eval { unlink $ task - > { tmptar } if $ task - > { tmptar } && - f $ task - > { tmptar } ; } ;
warn $@ if $@ ;
2013-10-04 06:29:17 +02:00
eval { rmtree $ task - > { tmpdir } if $ task - > { tmpdir } && - d $ task - > { tmpdir } ; } ;
2011-10-12 08:25:18 +02:00
warn $@ if $@ ;
my $ delay = $ task - > { backuptime } = time ( ) - $ vmstarttime ;
if ( $ err ) {
$ task - > { state } = 'err' ;
$ task - > { msg } = $ err ;
debugmsg ( 'err' , "Backup of VM $vmid failed - $err" , $ logfd , 1 ) ;
2019-04-23 09:04:49 +02:00
debugmsg ( 'info' , "Failed at " . strftime ( "%F %H:%M:%S" , localtime ( ) ) ) ;
2011-10-12 08:25:18 +02:00
eval { $ self - > run_hook_script ( 'backup-abort' , $ task , $ logfd ) ; } ;
} else {
$ task - > { state } = 'ok' ;
my $ tstr = format_time ( $ delay ) ;
debugmsg ( 'info' , "Finished Backup of VM $vmid ($tstr)" , $ logfd , 1 ) ;
2019-04-23 09:04:49 +02:00
debugmsg ( 'info' , "Backup finished at " . strftime ( "%F %H:%M:%S" , localtime ( ) ) ) ;
2011-10-12 08:25:18 +02:00
}
close ( $ logfd ) if $ logfd ;
2019-06-19 12:08:36 +02:00
2020-02-19 13:59:43 +01:00
if ( $ task - > { tmplog } ) {
2020-05-06 10:57:52 +02:00
if ( $ self - > { opts } - > { pbs } ) {
2020-02-19 13:59:43 +01:00
if ( $ task - > { state } eq 'ok' ) {
2021-02-25 13:59:42 +01:00
eval {
PVE::Storage::PBSPlugin:: run_raw_client_cmd (
$ opts - > { scfg } ,
$ opts - > { storage } ,
'upload-log' ,
[ $ pbs_snapshot_name , $ task - > { tmplog } ] ,
errmsg = > "uploading backup task log failed" ,
2021-10-11 09:14:23 +02:00
outfunc = > sub { } ,
2021-02-25 13:59:42 +01:00
) ;
} ;
debugmsg ( 'warn' , "$@" ) if $@ ; # $@ contains already error prefix
2020-02-19 13:59:43 +01:00
}
} elsif ( $ task - > { logfile } ) {
system { 'cp' } 'cp' , $ task - > { tmplog } , $ task - > { logfile } ;
}
2011-10-12 08:25:18 +02:00
}
eval { $ self - > run_hook_script ( 'log-end' , $ task ) ; } ;
die $ err if $ err && $ err =~ m/^interrupted by signal$/ ;
}
sub exec_backup {
2012-02-07 10:29:57 +01:00
my ( $ self , $ rpcenv , $ authuser ) = @ _ ;
2011-10-12 08:25:18 +02:00
my $ opts = $ self - > { opts } ;
debugmsg ( 'info' , "starting new backup job: $self->{cmdline}" , undef , 1 ) ;
2020-10-22 12:30:15 +02:00
if ( scalar ( @ { $ self - > { skiplist } } ) ) {
my $ skip_string = join ( ', ' , sort { $ a <=> $ b } @ { $ self - > { skiplist } } ) ;
debugmsg ( 'info' , "skip external VMs: $skip_string" ) ;
}
2019-06-19 12:08:36 +02:00
2011-10-12 08:25:18 +02:00
my $ tasklist = [] ;
2020-06-17 14:13:38 +02:00
my $ vzdump_plugins = { } ;
foreach my $ plugin ( @ { $ self - > { plugins } } ) {
my $ type = $ plugin - > type ( ) ;
next if exists $ vzdump_plugins - > { $ type } ;
$ vzdump_plugins - > { $ type } = $ plugin ;
}
2011-10-12 08:25:18 +02:00
2020-06-17 14:13:38 +02:00
my $ vmlist = PVE::Cluster:: get_vmlist ( ) ;
2020-10-22 12:30:14 +02:00
my $ vmids = [ sort { $ a <=> $ b } @ { $ opts - > { vmids } } ] ;
foreach my $ vmid ( @ { $ vmids } ) {
2020-10-22 12:30:13 +02:00
my $ plugin ;
if ( defined ( $ vmlist - > { ids } - > { $ vmid } ) ) {
my $ guest_type = $ vmlist - > { ids } - > { $ vmid } - > { type } ;
$ plugin = $ vzdump_plugins - > { $ guest_type } ;
next if ! $ rpcenv - > check ( $ authuser , "/vms/$vmid" , [ 'VM.Backup' ] , $ opts - > { all } ) ;
}
2020-06-17 14:13:38 +02:00
push @$ tasklist , {
2020-06-17 15:39:42 +02:00
mode = > $ opts - > { mode } ,
2020-06-17 14:13:38 +02:00
plugin = > $ plugin ,
2020-06-17 15:39:42 +02:00
state = > 'todo' ,
vmid = > $ vmid ,
} ;
2011-10-12 08:25:18 +02:00
}
2016-07-07 10:22:25 +02:00
# Use in-memory files for the outer hook logs to pass them to sendmail.
my $ job_start_log = '' ;
my $ job_end_log = '' ;
open my $ job_start_fd , '>' , \ $ job_start_log ;
open my $ job_end_fd , '>' , \ $ job_end_log ;
2011-10-12 08:25:18 +02:00
my $ starttime = time ( ) ;
my $ errcount = 0 ;
eval {
2016-07-07 10:22:25 +02:00
$ self - > run_hook_script ( 'job-start' , undef , $ job_start_fd ) ;
2011-10-12 08:25:18 +02:00
foreach my $ task ( @$ tasklist ) {
$ self - > exec_backup_task ( $ task ) ;
$ errcount += 1 if $ task - > { state } ne 'ok' ;
}
2016-07-07 10:22:25 +02:00
$ self - > run_hook_script ( 'job-end' , undef , $ job_end_fd ) ;
2011-10-12 08:25:18 +02:00
} ;
my $ err = $@ ;
2016-07-07 10:22:25 +02:00
$ self - > run_hook_script ( 'job-abort' , undef , $ job_end_fd ) if $ err ;
2011-10-12 08:25:18 +02:00
if ( $ err ) {
debugmsg ( 'err' , "Backup job failed - $err" , undef , 1 ) ;
} else {
if ( $ errcount ) {
debugmsg ( 'info' , "Backup job finished with errors" , undef , 1 ) ;
} else {
2011-11-14 14:08:33 +01:00
debugmsg ( 'info' , "Backup job finished successfully" , undef , 1 ) ;
2011-10-12 08:25:18 +02:00
}
}
2016-07-07 10:22:25 +02:00
close $ job_start_fd ;
close $ job_end_fd ;
2011-10-12 08:25:18 +02:00
my $ totaltime = time ( ) - $ starttime ;
2016-07-07 10:22:25 +02:00
eval { $ self - > sendmail ( $ tasklist , $ totaltime , undef , $ job_start_log , $ job_end_log ) ; } ;
2011-10-12 08:25:18 +02:00
debugmsg ( 'err' , $@ ) if $@ ;
2011-10-13 14:33:12 +02:00
die $ err if $ err ;
2019-06-19 12:08:36 +02:00
die "job errors\n" if $ errcount ;
2014-12-30 14:30:11 +01:00
unlink $ pidfile ;
2011-10-12 08:25:18 +02:00
}
2011-10-25 09:08:05 +02:00
2011-10-28 08:00:34 +02:00
sub option_exists {
my $ key = shift ;
return defined ( $ confdesc - > { $ key } ) ;
}
2021-02-15 13:25:00 +01:00
# NOTE it might make sense to merge this and verify_vzdump_parameters(), but one
# needs to adapt command_line() in guest-common's PVE/VZDump/Common.pm and detect
# a second parsing attempt, because verify_vzdump_parameters() is called twice
# during the update_job API call.
sub parse_mailto_exclude_path {
my ( $ param ) = @ _ ;
# exclude-path list need to be 0 separated
if ( defined ( $ param - > { 'exclude-path' } ) ) {
my @ expaths = split ( /\0/ , $ param - > { 'exclude-path' } || '' ) ;
$ param - > { 'exclude-path' } = [ @ expaths ] ;
}
if ( defined ( $ param - > { mailto } ) ) {
my @ mailto = PVE::Tools:: split_list ( extract_param ( $ param , 'mailto' ) ) ;
$ param - > { mailto } = [ @ mailto ] ;
}
return ;
}
2011-10-31 08:47:18 +01:00
sub verify_vzdump_parameters {
my ( $ param , $ check_missing ) = @ _ ;
raise_param_exc ( { all = > "option conflicts with option 'vmid'" } )
2015-01-20 09:26:31 +01:00
if $ param - > { all } && $ param - > { vmid } ;
2011-10-31 08:47:18 +01:00
raise_param_exc ( { exclude = > "option conflicts with option 'vmid'" } )
if $ param - > { exclude } && $ param - > { vmid } ;
2019-06-19 12:08:37 +02:00
raise_param_exc ( { pool = > "option conflicts with option 'vmid'" } )
if $ param - > { pool } && $ param - > { vmid } ;
2020-09-29 10:37:04 +02:00
raise_param_exc ( { 'prune-backups' = > "option conflicts with option 'maxfiles'" } )
if defined ( $ param - > { 'prune-backups' } ) && defined ( $ param - > { maxfiles } ) ;
2020-12-01 09:24:20 +01:00
$ parse_prune_backups_maxfiles - > ( $ param , 'CLI parameters' ) ;
2020-09-29 10:37:04 +02:00
2019-06-19 12:08:37 +02:00
$ param - > { all } = 1 if ( defined ( $ param - > { exclude } ) && ! $ param - > { pool } ) ;
2011-10-31 08:47:18 +01:00
2016-04-14 10:34:42 +02:00
warn "option 'size' is deprecated and will be removed in a future " .
"release, please update your script/configuration!\n"
if defined ( $ param - > { size } ) ;
2011-10-31 08:47:18 +01:00
return if ! $ check_missing ;
raise_param_exc ( { vmid = > "property is missing" } )
2019-06-19 12:08:37 +02:00
if ! ( $ param - > { all } || $ param - > { stop } || $ param - > { pool } ) && ! $ param - > { vmid } ;
2014-12-30 14:30:11 +01:00
}
2015-01-20 09:26:31 +01:00
sub stop_running_backups {
2014-12-30 14:30:11 +01:00
my ( $ self ) = @ _ ;
2015-01-20 09:26:31 +01:00
my $ upid = PVE::Tools:: file_read_firstline ( $ pidfile ) ;
return if ! $ upid ;
2014-12-30 14:30:11 +01:00
2015-01-20 09:26:31 +01:00
my $ task = PVE::Tools:: upid_decode ( $ upid ) ;
2014-12-30 14:30:11 +01:00
2019-06-19 12:08:36 +02:00
if ( PVE::ProcFSTools:: check_process_running ( $ task - > { pid } , $ task - > { pstart } ) &&
2015-01-20 09:26:31 +01:00
PVE::ProcFSTools:: read_proc_starttime ( $ task - > { pid } ) == $ task - > { pstart } ) {
kill ( 15 , $ task - > { pid } ) ;
# wait max 15 seconds to shut down (else, do nothing for now)
my $ i ;
for ( $ i = 15 ; $ i > 0 ; $ i - - ) {
last if ! PVE::ProcFSTools:: check_process_running ( ( $ task - > { pid } , $ task - > { pstart } ) ) ;
sleep ( 1 ) ;
}
2019-06-15 10:40:20 +02:00
die "stopping backup process $task->{pid} failed\n" if $ i == 0 ;
2014-12-30 14:30:11 +01:00
}
2011-10-31 08:47:18 +01:00
}
2020-06-08 15:00:34 +02:00
sub get_included_guests {
my ( $ job ) = @ _ ;
my $ vmids = [] ;
2020-06-17 14:13:38 +02:00
my $ vmids_per_node = { } ;
my $ vmlist = PVE::Cluster:: get_vmlist ( ) ;
2020-06-08 15:00:34 +02:00
if ( $ job - > { pool } ) {
$ vmids = PVE::API2Tools:: get_resource_pool_guest_members ( $ job - > { pool } ) ;
2020-06-17 14:13:38 +02:00
} elsif ( $ job - > { vmid } ) {
2020-06-17 15:45:21 +02:00
$ vmids = [ split_list ( $ job - > { vmid } ) ] ;
2020-07-09 13:21:28 +02:00
} elsif ( $ job - > { all } ) {
2020-06-17 14:13:38 +02:00
# all or exclude
2020-06-17 15:45:21 +02:00
my $ exclude = check_vmids ( split_list ( $ job - > { exclude } ) ) ;
my $ excludehash = { map { $ _ = > 1 } @$ exclude } ;
2020-06-17 14:13:38 +02:00
2020-06-17 15:45:21 +02:00
for my $ id ( keys % { $ vmlist - > { ids } } ) {
2020-06-17 14:13:38 +02:00
next if $ excludehash - > { $ id } ;
push @$ vmids , $ id ;
}
2020-07-09 13:21:28 +02:00
} else {
return $ vmids_per_node ;
2020-06-08 15:00:34 +02:00
}
2020-06-17 15:45:56 +02:00
$ vmids = check_vmids ( @$ vmids ) ;
2020-06-08 15:00:34 +02:00
2020-06-17 15:45:56 +02:00
for my $ vmid ( @$ vmids ) {
2020-10-22 12:30:13 +02:00
if ( defined ( $ vmlist - > { ids } - > { $ vmid } ) ) {
my $ node = $ vmlist - > { ids } - > { $ vmid } - > { node } ;
next if ( defined $ job - > { node } && $ job - > { node } ne $ node ) ;
2020-06-17 15:45:56 +02:00
2020-10-22 12:30:13 +02:00
push @ { $ vmids_per_node - > { $ node } } , $ vmid ;
} else {
push @ { $ vmids_per_node - > { '' } } , $ vmid ;
}
2020-06-08 15:00:34 +02:00
}
2020-06-17 14:13:38 +02:00
return $ vmids_per_node ;
2020-06-08 15:00:34 +02:00
}
2011-10-12 08:25:18 +02:00
1 ;