2011-08-23 07:47:04 +02:00
package PVE::VZDump::QemuServer ;
use strict ;
use warnings ;
2019-10-23 10:36:46 +02:00
2011-08-23 07:47:04 +02:00
use File::Basename ;
2019-10-23 10:36:46 +02:00
use File::Path ;
use IO::File ;
use IPC::Open3 ;
2020-06-25 12:23:34 +02:00
use JSON ;
2020-10-19 14:18:38 +02:00
use POSIX qw( EINTR EAGAIN ) ;
2019-10-23 10:36:46 +02:00
use PVE::Cluster qw( cfs_read_file ) ;
2011-10-14 11:05:06 +02:00
use PVE::INotify ;
2012-12-12 15:35:26 +01:00
use PVE::IPCC ;
2019-10-23 10:36:46 +02:00
use PVE::JSONSchema ;
2020-12-03 12:43:40 +01:00
use PVE::PBSClient ;
2022-10-03 15:52:06 +02:00
use PVE::RESTEnvironment qw( log_warn ) ;
2019-11-19 12:23:47 +01:00
use PVE::QMPClient ;
2012-05-24 07:22:41 +02:00
use PVE::Storage::Plugin ;
2020-03-11 07:55:53 +01:00
use PVE::Storage::PBSPlugin ;
2011-08-23 07:47:04 +02:00
use PVE::Storage ;
2019-10-23 10:36:46 +02:00
use PVE::Tools ;
use PVE::VZDump ;
2021-02-08 12:15:09 +01:00
use PVE::Format qw( render_duration render_bytes ) ;
2019-10-23 10:36:46 +02:00
2020-04-08 11:24:56 +02:00
use PVE::QemuConfig ;
2011-08-23 07:47:04 +02:00
use PVE::QemuServer ;
2019-11-19 12:23:48 +01:00
use PVE::QemuServer::Machine ;
2019-11-19 12:23:47 +01:00
use PVE::QemuServer::Monitor qw( mon_cmd ) ;
2011-08-23 07:47:04 +02:00
use base qw ( PVE::VZDump:: Plugin ) ;
sub new {
my ( $ class , $ vzdump ) = @ _ ;
2014-12-10 06:40:21 +01:00
2011-10-14 11:05:06 +02:00
PVE::VZDump:: check_bin ( 'qm' ) ;
2011-08-23 07:47:04 +02:00
2016-11-16 20:10:40 +01:00
my $ self = bless { vzdump = > $ vzdump } , $ class ;
2011-08-23 07:47:04 +02:00
$ self - > { vmlist } = PVE::QemuServer:: vzlist ( ) ;
$ self - > { storecfg } = PVE::Storage:: config ( ) ;
return $ self ;
} ;
sub type {
return 'qemu' ;
}
sub vmlist {
my ( $ self ) = @ _ ;
return [ keys % { $ self - > { vmlist } } ] ;
}
sub prepare {
my ( $ self , $ task , $ vmid , $ mode ) = @ _ ;
$ task - > { disks } = [] ;
2016-03-07 12:41:12 +01:00
my $ conf = $ self - > { vmlist } - > { $ vmid } = PVE::QemuConfig - > load_config ( $ vmid ) ;
2011-08-23 07:47:04 +02:00
2016-06-27 14:37:14 +02:00
$ self - > loginfo ( "VM Name: $conf->{name}" )
if defined ( $ conf - > { name } ) ;
2012-12-12 15:35:26 +01:00
$ self - > { vm_was_running } = 1 ;
2021-01-20 13:32:04 +01:00
$ self - > { vm_was_paused } = 0 ;
2012-12-12 15:35:26 +01:00
if ( ! PVE::QemuServer:: check_running ( $ vmid ) ) {
$ self - > { vm_was_running } = 0 ;
2021-01-20 13:32:04 +01:00
} elsif ( PVE::QemuServer:: vm_is_paused ( $ vmid ) ) {
$ self - > { vm_was_paused } = 1 ;
2012-09-27 09:42:48 +02:00
}
2011-08-23 07:47:04 +02:00
$ task - > { hostname } = $ conf - > { name } ;
2014-12-10 06:40:21 +01:00
my $ hostname = PVE::INotify:: nodename ( ) ;
2011-08-23 07:47:04 +02:00
2012-02-24 07:40:12 +01:00
my $ vollist = [] ;
my $ drivehash = { } ;
2020-06-22 16:34:36 +02:00
my $ backup_volumes = PVE::QemuConfig - > get_backup_volumes ( $ conf ) ;
2011-08-23 07:47:04 +02:00
2020-06-22 16:34:36 +02:00
foreach my $ volume ( @ { $ backup_volumes } ) {
my $ name = $ volume - > { key } ;
2020-06-24 10:59:05 +02:00
my $ volume_config = $ volume - > { volume_config } ;
2020-06-22 16:34:36 +02:00
my $ volid = $ volume_config - > { file } ;
2011-08-23 07:47:04 +02:00
2020-06-22 16:34:36 +02:00
if ( ! $ volume - > { included } ) {
$ self - > loginfo ( "exclude disk '$name' '$volid' ($volume->{reason})" ) ;
next ;
fix #3075: add TPM v1.2 and v2.0 support via swtpm
Starts an instance of swtpm per VM in it's systemd scope, it will
terminate by itself if the VM exits, or be terminated manually if
startup fails.
Before first use, a TPM state is created via swtpm_setup. State is
stored in a 'tpmstate0' volume, treated much the same way as an efidisk.
It is migrated 'offline', the important part here is the creation of the
target volume, the actual data transfer happens via the QEMU device
state migration process.
Move-disk can only work offline, as the disk is not registered with
QEMU, so 'drive-mirror' wouldn't work. swtpm itself has no method of
moving a backing storage at runtime.
For backups, a bit of a workaround is necessary (this may later be
replaced by NBD support in swtpm): During the backup, we attach the
backing file of the TPM as a read-only drive to QEMU, so our backup
code can detect it as a block device and back it up as such, while
ensuring consistency with the rest of disk state ("snapshot" semantic).
The name for the ephemeral drive is specifically chosen as
'drive-tpmstate0-backup', diverging from our usual naming scheme with
the '-backup' suffix, to avoid it ever being treated as a regular drive
from the rest of the stack in case it gets left over after a backup for
some reason (shouldn't happen).
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-10-04 17:29:20 +02:00
} elsif ( $ self - > { vm_was_running } && $ volume_config - > { iothread } &&
! PVE::QemuServer::Machine:: runs_at_least_qemu_version ( $ vmid , 4 , 0 , 1 ) ) {
die "disk '$name' '$volid' (iothread=on) can't use backup feature with running QEMU " .
"version < 4.0.1! Either set backup=no for this drive or upgrade QEMU and restart VM\n" ;
2016-10-20 11:20:13 +02:00
} else {
2020-06-22 16:34:36 +02:00
my $ log = "include disk '$name' '$volid'" ;
if ( defined ( my $ size = $ volume_config - > { size } ) ) {
my $ readable_size = PVE::JSONSchema:: format_size ( $ size ) ;
2016-12-20 11:33:01 +01:00
$ log . = " $readable_size" ;
2020-06-22 16:34:36 +02:00
}
2016-12-20 11:33:01 +01:00
$ self - > loginfo ( $ log ) ;
2014-12-10 06:40:21 +01:00
}
2012-02-24 07:40:12 +01:00
2012-05-24 07:22:41 +02:00
my ( $ storeid , $ volname ) = PVE::Storage:: parse_volume_id ( $ volid , 1 ) ;
2012-02-24 07:40:12 +01:00
push @$ vollist , $ volid if $ storeid ;
2020-06-22 16:34:36 +02:00
$ drivehash - > { $ name } = $ volume - > { volume_config } ;
}
2012-02-24 07:40:12 +01:00
PVE::Storage:: activate_volumes ( $ self - > { storecfg } , $ vollist ) ;
2012-11-05 06:26:25 +01:00
foreach my $ ds ( sort keys %$ drivehash ) {
my $ drive = $ drivehash - > { $ ds } ;
2014-12-10 06:40:21 +01:00
2011-08-23 07:47:04 +02:00
my $ volid = $ drive - > { file } ;
2012-05-24 07:22:41 +02:00
my ( $ storeid , $ volname ) = PVE::Storage:: parse_volume_id ( $ volid , 1 ) ;
2020-03-19 11:48:19 +01:00
my $ path = $ volid ;
2011-08-23 07:47:04 +02:00
if ( $ storeid ) {
2012-02-24 07:40:12 +01:00
$ path = PVE::Storage:: path ( $ self - > { storecfg } , $ volid ) ;
2011-08-23 07:47:04 +02:00
}
2012-02-24 07:40:12 +01:00
next if ! $ path ;
2011-08-23 07:47:04 +02:00
2023-03-27 11:01:05 +02:00
my ( $ size , $ format ) ;
if ( $ storeid ) {
# The call in list context can be expensive for certain plugins like RBD, just get size
$ size = eval { PVE::Storage:: volume_size_info ( $ self - > { storecfg } , $ volid , 5 ) } ;
2023-03-27 11:01:06 +02:00
die "cannot determine size of volume '$volid' - $@\n" if $@ ;
2012-12-12 15:35:26 +01:00
2023-03-27 11:01:05 +02:00
my $ scfg = PVE::Storage:: storage_config ( $ self - > { storecfg } , $ storeid ) ;
$ format = PVE::QemuServer:: qemu_img_format ( $ scfg , $ volname ) ;
} else {
( $ size , $ format ) = eval {
PVE::Storage:: volume_size_info ( $ self - > { storecfg } , $ volid , 5 ) ;
} ;
2023-03-27 11:01:06 +02:00
die "cannot determine size and format of volume '$volid' - $@\n" if $@ ;
2023-03-27 11:01:05 +02:00
}
2023-03-21 10:17:58 +01:00
2020-03-19 11:48:19 +01:00
my $ diskinfo = {
path = > $ path ,
volid = > $ volid ,
storeid = > $ storeid ,
format = > $ format ,
virtdev = > $ ds ,
qmdevice = > "drive-$ds" ,
} ;
2011-08-23 07:47:04 +02:00
fix #3075: add TPM v1.2 and v2.0 support via swtpm
Starts an instance of swtpm per VM in it's systemd scope, it will
terminate by itself if the VM exits, or be terminated manually if
startup fails.
Before first use, a TPM state is created via swtpm_setup. State is
stored in a 'tpmstate0' volume, treated much the same way as an efidisk.
It is migrated 'offline', the important part here is the creation of the
target volume, the actual data transfer happens via the QEMU device
state migration process.
Move-disk can only work offline, as the disk is not registered with
QEMU, so 'drive-mirror' wouldn't work. swtpm itself has no method of
moving a backing storage at runtime.
For backups, a bit of a workaround is necessary (this may later be
replaced by NBD support in swtpm): During the backup, we attach the
backing file of the TPM as a read-only drive to QEMU, so our backup
code can detect it as a block device and back it up as such, while
ensuring consistency with the rest of disk state ("snapshot" semantic).
The name for the ephemeral drive is specifically chosen as
'drive-tpmstate0-backup', diverging from our usual naming scheme with
the '-backup' suffix, to avoid it ever being treated as a regular drive
from the rest of the stack in case it gets left over after a backup for
some reason (shouldn't happen).
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-10-04 17:29:20 +02:00
if ( $ ds eq 'tpmstate0' ) {
# TPM drive only exists for backup, which is reflected in the name
$ diskinfo - > { qmdevice } = 'drive-tpmstate0-backup' ;
$ task - > { tpmpath } = $ path ;
}
2011-08-23 07:47:04 +02:00
if ( - b $ path ) {
$ diskinfo - > { type } = 'block' ;
} else {
$ diskinfo - > { type } = 'file' ;
}
push @ { $ task - > { disks } } , $ diskinfo ;
2012-02-24 07:40:12 +01:00
}
2011-08-23 07:47:04 +02:00
}
sub vm_status {
my ( $ self , $ vmid ) = @ _ ;
2011-10-14 11:05:06 +02:00
my $ running = PVE::QemuServer:: check_running ( $ vmid ) ? 1 : 0 ;
2014-12-10 06:40:21 +01:00
return wantarray ? ( $ running , $ running ? 'running' : 'stopped' ) : $ running ;
2011-08-23 07:47:04 +02:00
}
sub lock_vm {
my ( $ self , $ vmid ) = @ _ ;
2020-03-19 15:58:41 +01:00
PVE::QemuConfig - > set_lock ( $ vmid , 'backup' ) ;
2011-08-23 07:47:04 +02:00
}
sub unlock_vm {
my ( $ self , $ vmid ) = @ _ ;
2020-03-19 15:58:41 +01:00
PVE::QemuConfig - > remove_lock ( $ vmid , 'backup' ) ;
2011-08-23 07:47:04 +02:00
}
sub stop_vm {
my ( $ self , $ task , $ vmid ) = @ _ ;
my $ opts = $ self - > { vzdump } - > { opts } ;
my $ wait = $ opts - > { stopwait } * 60 ;
# send shutdown and wait
2012-01-17 11:56:56 +01:00
$ self - > cmd ( "qm shutdown $vmid --skiplock --keepActive --timeout $wait" ) ;
2011-08-23 07:47:04 +02:00
}
sub start_vm {
my ( $ self , $ task , $ vmid ) = @ _ ;
2011-10-14 11:05:06 +02:00
$ self - > cmd ( "qm start $vmid --skiplock" ) ;
2011-08-23 07:47:04 +02:00
}
sub suspend_vm {
my ( $ self , $ task , $ vmid ) = @ _ ;
2021-01-20 13:32:04 +01:00
return if $ self - > { vm_was_paused } ;
2011-10-14 11:05:06 +02:00
$ self - > cmd ( "qm suspend $vmid --skiplock" ) ;
2011-08-23 07:47:04 +02:00
}
sub resume_vm {
my ( $ self , $ task , $ vmid ) = @ _ ;
2021-01-20 13:32:04 +01:00
return if $ self - > { vm_was_paused } ;
2011-10-14 11:05:06 +02:00
$ self - > cmd ( "qm resume $vmid --skiplock" ) ;
2011-08-23 07:47:04 +02:00
}
sub assemble {
my ( $ self , $ task , $ vmid ) = @ _ ;
2016-03-09 14:34:03 +01:00
my $ conffile = PVE::QemuConfig - > config_file ( $ vmid ) ;
2011-08-23 07:47:04 +02:00
my $ outfile = "$task->{tmpdir}/qemu-server.conf" ;
2015-11-25 10:20:05 +01:00
my $ firewall_src = "/etc/pve/firewall/$vmid.fw" ;
my $ firewall_dest = "$task->{tmpdir}/qemu-server.fw" ;
my $ outfd = IO::File - > new ( ">$outfile" ) ||
die "unable to open '$outfile'" ;
my $ conffd = IO::File - > new ( $ conffile , 'r' ) ||
die "unable open '$conffile'" ;
my $ found_snapshot ;
2016-05-09 12:59:13 +02:00
my $ found_pending ;
2022-06-22 13:51:58 +02:00
my $ found_cloudinit ;
2015-11-25 10:20:05 +01:00
while ( defined ( my $ line = <$conffd> ) ) {
next if $ line =~ m/^\#vzdump\#/ ; # just to be sure
next if $ line =~ m/^\#qmdump\#/ ; # just to be sure
2016-05-09 12:59:13 +02:00
if ( $ line =~ m/^\[(.*)\]\s*$/ ) {
if ( $ 1 =~ m/PENDING/i ) {
$ found_pending = 1 ;
2022-06-22 13:51:58 +02:00
} elsif ( $ 1 =~ m/special:cloudinit/ ) {
$ found_cloudinit = 1 ;
2016-05-09 12:59:13 +02:00
} else {
$ found_snapshot = 1 ;
}
2011-08-23 07:47:04 +02:00
}
2022-06-22 13:51:58 +02:00
next if $ found_snapshot || $ found_pending || $ found_cloudinit ; # skip all snapshots,pending changes and cloudinit config data
2016-05-09 12:59:13 +02:00
2015-11-25 10:20:05 +01:00
if ( $ line =~ m/^unused\d+:\s*(\S+)\s*/ ) {
$ self - > loginfo ( "skip unused drive '$1' (not included into backup)" ) ;
next ;
2011-08-23 07:47:04 +02:00
}
2015-11-25 10:20:05 +01:00
next if $ line =~ m/^lock:/ || $ line =~ m/^parent:/ ;
2012-12-12 15:35:26 +01:00
2015-11-25 10:20:05 +01:00
print $ outfd $ line ;
}
foreach my $ di ( @ { $ task - > { disks } } ) {
if ( $ di - > { type } eq 'block' || $ di - > { type } eq 'file' ) {
my $ storeid = $ di - > { storeid } || '' ;
my $ format = $ di - > { format } || '' ;
print $ outfd "#qmdump#map:$di->{virtdev}:$di->{qmdevice}:$storeid:$format:\n" ;
} else {
die "internal error" ;
2012-12-12 15:35:26 +01:00
}
2015-11-25 10:20:05 +01:00
}
2011-08-23 07:47:04 +02:00
2015-11-25 10:20:05 +01:00
if ( $ found_snapshot ) {
$ self - > loginfo ( "snapshots found (not included into backup)" ) ;
}
2016-05-09 12:59:13 +02:00
if ( $ found_pending ) {
$ self - > loginfo ( "pending configuration changes found (not included into backup)" ) ;
}
2015-11-25 10:20:05 +01:00
PVE::Tools:: file_copy ( $ firewall_src , $ firewall_dest ) if - f $ firewall_src ;
2011-08-23 07:47:04 +02:00
}
sub archive {
2012-02-07 10:40:28 +01:00
my ( $ self , $ task , $ vmid , $ filename , $ comp ) = @ _ ;
2011-08-23 07:47:04 +02:00
2020-03-11 07:55:53 +01:00
my $ opts = $ self - > { vzdump } - > { opts } ;
my $ scfg = $ opts - > { scfg } ;
2020-05-06 10:57:53 +02:00
if ( $ self - > { vzdump } - > { opts } - > { pbs } ) {
2020-03-11 07:55:53 +01:00
$ self - > archive_pbs ( $ task , $ vmid ) ;
} else {
$ self - > archive_vma ( $ task , $ vmid , $ filename , $ comp ) ;
}
}
2020-08-19 17:02:01 +02:00
my $ bitmap_action_to_human = sub {
2020-08-19 20:30:30 +02:00
my ( $ self , $ info ) = @ _ ;
2020-08-19 17:02:01 +02:00
my $ action = $ info - > { action } ;
if ( $ action eq "not-used" ) {
2020-10-19 14:18:39 +02:00
return "disabled (no support)" ;
2020-08-19 17:02:01 +02:00
} elsif ( $ action eq "not-used-removed" ) {
return "disabled (old bitmap cleared)" ;
} elsif ( $ action eq "new" ) {
2020-08-19 20:30:30 +02:00
return "created new" ;
2020-08-19 17:02:01 +02:00
} elsif ( $ action eq "used" ) {
if ( $ info - > { dirty } == 0 ) {
2020-08-19 20:30:30 +02:00
return "OK (drive clean)" ;
2020-08-19 17:02:01 +02:00
} else {
2021-02-08 12:15:09 +01:00
my $ size = render_bytes ( $ info - > { size } , 1 ) ;
my $ dirty = render_bytes ( $ info - > { dirty } , 1 ) ;
2020-08-19 20:30:30 +02:00
return "OK ($dirty of $size dirty)" ;
2020-08-19 17:02:01 +02:00
}
} elsif ( $ action eq "invalid" ) {
return "existing bitmap was invalid and has been cleared" ;
} else {
return "unknown" ;
}
} ;
2020-03-11 07:55:53 +01:00
my $ query_backup_status_loop = sub {
2020-08-20 10:45:34 +02:00
my ( $ self , $ vmid , $ job_uuid , $ qemu_support ) = @ _ ;
2020-03-11 07:55:53 +01:00
my $ starttime = time ( ) ;
2020-03-19 17:15:45 +01:00
my $ last_time = $ starttime ;
2020-08-19 17:02:02 +02:00
my ( $ last_percent , $ last_total , $ last_target , $ last_zero , $ last_transferred ) = ( - 1 , 0 , 0 , 0 , 0 ) ;
2020-07-06 21:45:25 +02:00
my ( $ transferred , $ reused ) ;
2020-03-11 07:55:53 +01:00
2020-03-19 17:15:45 +01:00
my $ get_mbps = sub {
my ( $ mb , $ delta ) = @ _ ;
2020-07-06 21:45:25 +02:00
return "0 B/s" if $ mb <= 0 ;
my $ bw = int ( ( $ mb / $ delta ) ) ;
2021-02-08 12:15:09 +01:00
return render_bytes ( $ bw , 1 ) . "/s" ;
2020-03-19 17:15:45 +01:00
} ;
2020-08-19 17:02:01 +02:00
my $ target = 0 ;
2020-08-19 17:02:02 +02:00
my $ last_reused = 0 ;
2020-08-20 10:45:34 +02:00
my $ has_query_bitmap = $ qemu_support && $ qemu_support - > { 'query-bitmap-info' } ;
2020-08-21 14:11:48 +02:00
my $ is_template = PVE::QemuConfig - > is_template ( $ self - > { vmlist } - > { $ vmid } ) ;
2020-08-20 10:45:34 +02:00
if ( $ has_query_bitmap ) {
2020-08-19 20:30:30 +02:00
my $ total = 0 ;
2020-08-19 17:02:01 +02:00
my $ bitmap_info = mon_cmd ( $ vmid , 'query-pbs-bitmap-info' ) ;
2020-08-21 14:11:48 +02:00
for my $ info ( sort { $ a - > { drive } cmp $ b - > { drive } } @$ bitmap_info ) {
if ( ! $ is_template ) {
my $ text = $ bitmap_action_to_human - > ( $ self , $ info ) ;
my $ drive = $ info - > { drive } ;
$ drive =~ s/^drive-// ; # for consistency
$ self - > loginfo ( "$drive: dirty-bitmap status: $text" ) ;
}
2020-08-19 17:02:01 +02:00
$ target += $ info - > { dirty } ;
2020-08-19 20:30:30 +02:00
$ total += $ info - > { size } ;
2020-08-19 17:02:02 +02:00
$ last_reused += $ info - > { size } - $ info - > { dirty } ;
2020-08-19 17:02:01 +02:00
}
2020-08-19 20:30:30 +02:00
if ( $ target < $ total ) {
2021-02-08 12:15:09 +01:00
my $ total_h = render_bytes ( $ total , 1 ) ;
my $ target_h = render_bytes ( $ target , 1 ) ;
2020-08-19 20:30:30 +02:00
$ self - > loginfo ( "using fast incremental mode (dirty-bitmap), $target_h dirty of $total_h total" ) ;
}
2020-08-19 17:02:01 +02:00
}
2020-09-28 17:48:36 +02:00
my $ last_finishing = 0 ;
2020-03-11 07:55:53 +01:00
while ( 1 ) {
2020-03-19 17:15:45 +01:00
my $ status = mon_cmd ( $ vmid , 'query-backup' ) ;
2020-03-11 07:55:53 +01:00
my $ total = $ status - > { total } || 0 ;
2020-08-20 10:45:34 +02:00
my $ dirty = $ status - > { dirty } ;
$ target = ( defined ( $ dirty ) && $ dirty < $ total ) ? $ dirty : $ total if ! $ has_query_bitmap ;
2020-03-11 07:55:53 +01:00
$ transferred = $ status - > { transferred } || 0 ;
2020-07-06 21:45:25 +02:00
$ reused = $ status - > { reused } ;
2020-08-19 17:02:03 +02:00
my $ percent = $ target ? int ( ( $ transferred * 100 ) / $ target ) : 100 ;
2020-03-11 07:55:53 +01:00
my $ zero = $ status - > { 'zero-bytes' } || 0 ;
die "got unexpected uuid\n" if ! $ status - > { uuid } || ( $ status - > { uuid } ne $ job_uuid ) ;
my $ ctime = time ( ) ;
my $ duration = $ ctime - $ starttime ;
my $ rbytes = $ transferred - $ last_transferred ;
2020-08-19 17:02:02 +02:00
my $ wbytes ;
if ( $ reused ) {
# reused includes zero bytes for PBS
$ wbytes = $ rbytes - ( $ reused - $ last_reused ) ;
} else {
$ wbytes = $ rbytes - ( $ zero - $ last_zero ) ;
}
2020-03-11 07:55:53 +01:00
my $ timediff = ( $ ctime - $ last_time ) || 1 ; # fixme
2020-03-19 17:15:45 +01:00
my $ mbps_read = $ get_mbps - > ( $ rbytes , $ timediff ) ;
my $ mbps_write = $ get_mbps - > ( $ wbytes , $ timediff ) ;
2021-02-08 12:15:09 +01:00
my $ target_h = render_bytes ( $ target , 1 ) ;
my $ transferred_h = render_bytes ( $ transferred , 1 ) ;
2020-07-06 21:45:25 +02:00
2020-08-20 10:23:18 +02:00
my $ statusline = sprintf ( "%3d%% ($transferred_h of $target_h) in %s"
2021-02-08 12:15:09 +01:00
. ", read: $mbps_read, write: $mbps_write" , $ percent , render_duration ( $ duration ) ) ;
2020-03-19 17:15:45 +01:00
2020-03-11 07:55:53 +01:00
my $ res = $ status - > { status } || 'unknown' ;
if ( $ res ne 'active' ) {
2020-09-28 17:48:36 +02:00
if ( $ last_percent < 100 ) {
$ self - > loginfo ( $ statusline ) ;
}
2020-03-19 17:15:45 +01:00
if ( $ res ne 'done' ) {
die ( ( $ status - > { errmsg } || "unknown error" ) . "\n" ) if $ res eq 'error' ;
die "got unexpected status '$res'\n" ;
}
2020-07-06 21:46:03 +02:00
$ last_target = $ target if $ target ;
$ last_total = $ total if $ total ;
$ last_zero = $ zero if $ zero ;
$ last_transferred = $ transferred if $ transferred ;
2020-03-11 07:55:53 +01:00
last ;
}
2020-03-19 17:15:45 +01:00
if ( $ percent != $ last_percent && ( $ timediff > 2 ) ) {
2020-03-11 07:55:53 +01:00
$ self - > loginfo ( $ statusline ) ;
2020-03-19 17:15:45 +01:00
$ last_percent = $ percent ;
2020-07-06 21:45:25 +02:00
$ last_target = $ target if $ target ;
2020-03-11 07:55:53 +01:00
$ last_total = $ total if $ total ;
$ last_zero = $ zero if $ zero ;
$ last_transferred = $ transferred if $ transferred ;
$ last_time = $ ctime ;
2020-08-19 17:02:02 +02:00
$ last_reused = $ reused ;
2020-09-28 17:48:36 +02:00
if ( ! $ last_finishing && $ status - > { finishing } ) {
2020-09-29 17:37:31 +02:00
$ self - > loginfo ( "Waiting for server to finish backup validation..." ) ;
2020-09-28 17:48:36 +02:00
}
$ last_finishing = $ status - > { finishing } ;
2020-03-11 07:55:53 +01:00
}
sleep ( 1 ) ;
}
my $ duration = time ( ) - $ starttime ;
2020-08-20 10:22:44 +02:00
if ( $ last_zero ) {
my $ zero_per = $ last_target ? int ( ( $ last_zero * 100 ) / $ last_target ) : 0 ;
2021-02-08 12:15:09 +01:00
my $ zero_h = render_bytes ( $ last_zero ) ;
2020-08-20 10:22:44 +02:00
$ self - > loginfo ( "backup is sparse: $zero_h (${zero_per}%) total zero data" ) ;
}
2020-08-20 15:32:23 +02:00
if ( $ reused ) {
2021-02-08 12:15:09 +01:00
my $ reused_h = render_bytes ( $ reused ) ;
2020-08-20 15:32:23 +02:00
my $ reuse_per = int ( $ reused * 100 / $ last_total ) ;
$ self - > loginfo ( "backup was done incrementally, reused $reused_h (${reuse_per}%)" ) ;
}
2020-08-19 20:31:09 +02:00
if ( $ transferred ) {
2021-02-08 12:15:09 +01:00
my $ transferred_h = render_bytes ( $ transferred ) ;
2020-08-19 20:31:09 +02:00
if ( $ duration ) {
my $ mbps = $ get_mbps - > ( $ transferred , $ duration ) ;
$ self - > loginfo ( "transferred $transferred_h in $duration seconds ($mbps)" ) ;
} else {
$ self - > loginfo ( "transferred $transferred_h in <1 seconds" ) ;
}
2020-07-06 21:45:25 +02:00
}
2020-07-06 21:47:30 +02:00
return {
total = > $ last_total ,
reused = > $ reused ,
} ;
2020-03-11 07:55:53 +01:00
} ;
fix #3075: add TPM v1.2 and v2.0 support via swtpm
Starts an instance of swtpm per VM in it's systemd scope, it will
terminate by itself if the VM exits, or be terminated manually if
startup fails.
Before first use, a TPM state is created via swtpm_setup. State is
stored in a 'tpmstate0' volume, treated much the same way as an efidisk.
It is migrated 'offline', the important part here is the creation of the
target volume, the actual data transfer happens via the QEMU device
state migration process.
Move-disk can only work offline, as the disk is not registered with
QEMU, so 'drive-mirror' wouldn't work. swtpm itself has no method of
moving a backing storage at runtime.
For backups, a bit of a workaround is necessary (this may later be
replaced by NBD support in swtpm): During the backup, we attach the
backing file of the TPM as a read-only drive to QEMU, so our backup
code can detect it as a block device and back it up as such, while
ensuring consistency with the rest of disk state ("snapshot" semantic).
The name for the ephemeral drive is specifically chosen as
'drive-tpmstate0-backup', diverging from our usual naming scheme with
the '-backup' suffix, to avoid it ever being treated as a regular drive
from the rest of the stack in case it gets left over after a backup for
some reason (shouldn't happen).
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-10-04 17:29:20 +02:00
my $ attach_tpmstate_drive = sub {
my ( $ self , $ task , $ vmid ) = @ _ ;
return if ! $ task - > { tpmpath } ;
# unconditionally try to remove the tpmstate-named drive - it only exists
# for backing up, and avoids errors if left over from some previous event
eval { PVE::QemuServer:: qemu_drivedel ( $ vmid , "tpmstate0-backup" ) ; } ;
$ self - > loginfo ( 'attaching TPM drive to QEMU for backup' ) ;
my $ drive = "file=$task->{tpmpath},if=none,read-only=on,id=drive-tpmstate0-backup" ;
2022-09-14 15:07:27 +02:00
$ drive =~ s/\\/\\\\/g ;
fix #3075: add TPM v1.2 and v2.0 support via swtpm
Starts an instance of swtpm per VM in it's systemd scope, it will
terminate by itself if the VM exits, or be terminated manually if
startup fails.
Before first use, a TPM state is created via swtpm_setup. State is
stored in a 'tpmstate0' volume, treated much the same way as an efidisk.
It is migrated 'offline', the important part here is the creation of the
target volume, the actual data transfer happens via the QEMU device
state migration process.
Move-disk can only work offline, as the disk is not registered with
QEMU, so 'drive-mirror' wouldn't work. swtpm itself has no method of
moving a backing storage at runtime.
For backups, a bit of a workaround is necessary (this may later be
replaced by NBD support in swtpm): During the backup, we attach the
backing file of the TPM as a read-only drive to QEMU, so our backup
code can detect it as a block device and back it up as such, while
ensuring consistency with the rest of disk state ("snapshot" semantic).
The name for the ephemeral drive is specifically chosen as
'drive-tpmstate0-backup', diverging from our usual naming scheme with
the '-backup' suffix, to avoid it ever being treated as a regular drive
from the rest of the stack in case it gets left over after a backup for
some reason (shouldn't happen).
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-10-04 17:29:20 +02:00
my $ ret = PVE::QemuServer::Monitor:: hmp_cmd ( $ vmid , "drive_add auto \"$drive\"" ) ;
2022-09-14 15:07:28 +02:00
die "attaching TPM drive failed - $ret\n" if $ ret !~ m/OK/s ;
fix #3075: add TPM v1.2 and v2.0 support via swtpm
Starts an instance of swtpm per VM in it's systemd scope, it will
terminate by itself if the VM exits, or be terminated manually if
startup fails.
Before first use, a TPM state is created via swtpm_setup. State is
stored in a 'tpmstate0' volume, treated much the same way as an efidisk.
It is migrated 'offline', the important part here is the creation of the
target volume, the actual data transfer happens via the QEMU device
state migration process.
Move-disk can only work offline, as the disk is not registered with
QEMU, so 'drive-mirror' wouldn't work. swtpm itself has no method of
moving a backing storage at runtime.
For backups, a bit of a workaround is necessary (this may later be
replaced by NBD support in swtpm): During the backup, we attach the
backing file of the TPM as a read-only drive to QEMU, so our backup
code can detect it as a block device and back it up as such, while
ensuring consistency with the rest of disk state ("snapshot" semantic).
The name for the ephemeral drive is specifically chosen as
'drive-tpmstate0-backup', diverging from our usual naming scheme with
the '-backup' suffix, to avoid it ever being treated as a regular drive
from the rest of the stack in case it gets left over after a backup for
some reason (shouldn't happen).
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-10-04 17:29:20 +02:00
} ;
my $ detach_tpmstate_drive = sub {
my ( $ task , $ vmid ) = @ _ ;
return if ! $ task - > { tpmpath } || ! PVE::QemuServer:: check_running ( $ vmid ) ;
eval { PVE::QemuServer:: qemu_drivedel ( $ vmid , "tpmstate0-backup" ) ; } ;
} ;
2022-10-03 15:52:06 +02:00
my sub add_backup_performance_options {
my ( $ qmp_param , $ perf , $ qemu_support ) = @ _ ;
return if ! $ perf || scalar ( keys $ perf - > % * ) == 0 ;
if ( ! $ qemu_support ) {
my $ settings_string = join ( ', ' , sort keys $ perf - > % * ) ;
log_warn ( "ignoring setting(s): $settings_string - issue checking if supported" ) ;
return ;
}
if ( defined ( $ perf - > { 'max-workers' } ) ) {
if ( $ qemu_support - > { 'backup-max-workers' } ) {
$ qmp_param - > { 'max-workers' } = int ( $ perf - > { 'max-workers' } ) ;
} else {
log_warn ( "ignoring 'max-workers' setting - not supported by running QEMU" ) ;
}
}
}
2020-03-11 07:55:53 +01:00
sub archive_pbs {
my ( $ self , $ task , $ vmid ) = @ _ ;
2011-08-23 07:47:04 +02:00
my $ conffile = "$task->{tmpdir}/qemu-server.conf" ;
2015-11-25 10:20:05 +01:00
my $ firewall = "$task->{tmpdir}/qemu-server.fw" ;
2011-08-23 07:47:04 +02:00
my $ opts = $ self - > { vzdump } - > { opts } ;
2020-03-11 07:55:53 +01:00
my $ scfg = $ opts - > { scfg } ;
my $ starttime = time ( ) ;
my $ fingerprint = $ scfg - > { fingerprint } ;
2020-12-03 12:43:40 +01:00
my $ repo = PVE::PBSClient:: get_repository ( $ scfg ) ;
2020-03-11 07:55:53 +01:00
my $ password = PVE::Storage::PBSPlugin:: pbs_get_password ( $ scfg , $ opts - > { storage } ) ;
2020-07-10 11:53:03 +02:00
my $ keyfile = PVE::Storage::PBSPlugin:: pbs_encryption_key_file_name ( $ scfg , $ opts - > { storage } ) ;
2021-05-28 14:09:53 +02:00
my $ master_keyfile = PVE::Storage::PBSPlugin:: pbs_master_pubkey_file_name ( $ scfg , $ opts - > { storage } ) ;
2020-03-11 07:55:53 +01:00
2020-03-19 18:46:49 +01:00
my $ diskcount = scalar ( @ { $ task - > { disks } } ) ;
2020-08-06 13:13:48 +02:00
# proxmox-backup-client can only handle raw files and block devs
# only use it (directly) for disk-less VMs
if ( ! $ diskcount ) {
$ self - > loginfo ( "backup contains no disks" ) ;
2020-03-11 07:55:53 +01:00
local $ ENV { PBS_PASSWORD } = $ password ;
2020-06-03 14:28:22 +02:00
local $ ENV { PBS_FINGERPRINT } = $ fingerprint if defined ( $ fingerprint ) ;
2020-03-11 07:55:53 +01:00
my $ cmd = [
'/usr/bin/proxmox-backup-client' ,
'backup' ,
'--repository' , $ repo ,
'--backup-type' , 'vm' ,
'--backup-id' , "$vmid" ,
'--backup-time' , $ task - > { backup_time } ,
2020-03-19 18:46:49 +01:00
] ;
2022-05-12 10:42:32 +02:00
if ( defined ( my $ ns = $ scfg - > { namespace } ) ) {
push @$ cmd , '--ns' , $ ns ;
}
2023-07-03 09:03:31 +02:00
if ( - e $ keyfile ) {
$ self - > loginfo ( "enabling encryption" ) ;
push @$ cmd , '--keyfile' , $ keyfile ;
if ( defined ( $ master_keyfile ) ) {
if ( - e $ master_keyfile ) {
$ self - > loginfo ( "enabling master key feature" ) ;
push @$ cmd , '--master-pubkey-file' , $ master_keyfile ;
} elsif ( $ scfg - > { 'master-pubkey' } ) {
die "master public key configured but no key file found\n" ;
}
}
} else {
my $ encryption_fp = $ scfg - > { 'encryption-key' } ;
die "encryption configured ('$encryption_fp') but no encryption key file found!\n"
if $ encryption_fp ;
$ self - > loginfo ( "WARNING: backup target is configured with master key, but this backup is not encrypted - master key settings will be ignored!" )
if defined ( $ master_keyfile ) && - e $ master_keyfile ;
}
2020-03-11 07:55:53 +01:00
push @$ cmd , "qemu-server.conf:$conffile" ;
push @$ cmd , "fw.conf:$firewall" if - e $ firewall ;
$ self - > loginfo ( "starting template backup" ) ;
$ self - > loginfo ( join ( ' ' , @$ cmd ) ) ;
$ self - > cmd ( $ cmd ) ;
return ;
}
2020-03-19 18:32:00 +01:00
# get list early so we die on unkown drive types before doing anything
my $ devlist = _get_task_devlist ( $ task ) ;
2020-03-11 07:55:53 +01:00
2020-03-19 18:41:43 +01:00
$ self - > enforce_vm_running_for_backup ( $ vmid ) ;
2021-03-03 10:56:10 +01:00
$ self - > { qmeventd_fh } = PVE::QemuServer:: register_qmeventd_handle ( $ vmid ) ;
2020-03-11 07:55:53 +01:00
2020-03-11 07:55:58 +01:00
my $ backup_job_uuid ;
2020-03-11 07:55:53 +01:00
eval {
$ SIG { INT } = $ SIG { TERM } = $ SIG { QUIT } = $ SIG { HUP } = $ SIG { PIPE } = sub {
2020-03-19 18:46:49 +01:00
die "interrupted by signal\n" ;
2020-03-11 07:55:53 +01:00
} ;
2020-07-08 11:57:54 +02:00
my $ qemu_support = eval { mon_cmd ( $ vmid , "query-proxmox-support" ) } ;
2021-03-17 11:19:35 +01:00
my $ err = $@ ;
if ( ! $ qemu_support || $ err ) {
die "query-proxmox-support returned empty value\n" if ! $ err ;
if ( $ err =~ m/The command query-proxmox-support has not been found/ ) {
die "PBS backups are not supported by the running QEMU version. Please make "
. "sure you've installed the latest version and the VM has been restarted.\n" ;
} else {
die "QMP command query-proxmox-support failed - $err\n" ;
}
2020-07-08 11:57:54 +02:00
}
2021-05-28 14:09:53 +02:00
if ( ! defined ( $ qemu_support - > { "pbs-masterkey" } ) && - e $ master_keyfile ) {
$ self - > loginfo ( "WARNING: backup target is configured with master key, but running QEMU version does not support master keys." ) ;
$ self - > loginfo ( "Please make sure you've installed the latest version and the VM has been restarted to use master key feature." ) ;
$ master_keyfile = undef ; # skip rest of master key handling below
}
fix #3075: add TPM v1.2 and v2.0 support via swtpm
Starts an instance of swtpm per VM in it's systemd scope, it will
terminate by itself if the VM exits, or be terminated manually if
startup fails.
Before first use, a TPM state is created via swtpm_setup. State is
stored in a 'tpmstate0' volume, treated much the same way as an efidisk.
It is migrated 'offline', the important part here is the creation of the
target volume, the actual data transfer happens via the QEMU device
state migration process.
Move-disk can only work offline, as the disk is not registered with
QEMU, so 'drive-mirror' wouldn't work. swtpm itself has no method of
moving a backing storage at runtime.
For backups, a bit of a workaround is necessary (this may later be
replaced by NBD support in swtpm): During the backup, we attach the
backing file of the TPM as a read-only drive to QEMU, so our backup
code can detect it as a block device and back it up as such, while
ensuring consistency with the rest of disk state ("snapshot" semantic).
The name for the ephemeral drive is specifically chosen as
'drive-tpmstate0-backup', diverging from our usual naming scheme with
the '-backup' suffix, to avoid it ever being treated as a regular drive
from the rest of the stack in case it gets left over after a backup for
some reason (shouldn't happen).
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-10-04 17:29:20 +02:00
$ attach_tpmstate_drive - > ( $ self , $ task , $ vmid ) ;
2020-05-03 11:13:36 +02:00
my $ fs_frozen = $ self - > qga_fs_freeze ( $ task , $ vmid ) ;
2020-03-11 07:55:53 +01:00
2020-03-19 18:46:49 +01:00
my $ params = {
format = > "pbs" ,
'backup-file' = > $ repo ,
'backup-id' = > "$vmid" ,
'backup-time' = > $ task - > { backup_time } ,
password = > $ password ,
devlist = > $ devlist ,
'config-file' = > $ conffile ,
2020-03-11 07:55:53 +01:00
} ;
2022-05-12 10:42:32 +02:00
if ( defined ( my $ ns = $ scfg - > { namespace } ) ) {
2022-05-13 14:54:41 +02:00
$ params - > { 'backup-ns' } = $ ns ;
2022-05-12 10:42:32 +02:00
}
2022-10-03 15:52:06 +02:00
2020-08-20 15:32:22 +02:00
$ params - > { speed } = $ opts - > { bwlimit } * 1024 if $ opts - > { bwlimit } ;
2022-10-03 15:52:06 +02:00
add_backup_performance_options ( $ params , $ opts - > { performance } , $ qemu_support ) ;
2020-03-19 18:46:49 +01:00
$ params - > { fingerprint } = $ fingerprint if defined ( $ fingerprint ) ;
$ params - > { 'firewall-file' } = $ firewall if - e $ firewall ;
2020-07-10 11:53:03 +02:00
if ( - e $ keyfile ) {
$ self - > loginfo ( "enabling encryption" ) ;
$ params - > { keyfile } = $ keyfile ;
$ params - > { encrypt } = JSON:: true ;
2022-08-16 13:52:38 +02:00
if ( defined ( $ master_keyfile ) ) {
if ( - e $ master_keyfile ) {
$ self - > loginfo ( "enabling master key feature" ) ;
$ params - > { "master-keyfile" } = $ master_keyfile ;
} elsif ( $ scfg - > { 'master-pubkey' } ) {
die "master public key configured but no key file found\n" ;
}
2021-05-28 14:09:53 +02:00
}
2020-07-10 11:53:03 +02:00
} else {
2022-08-16 11:45:41 +02:00
my $ encryption_fp = $ scfg - > { 'encryption-key' } ;
die "encryption configured ('$encryption_fp') but no encryption key file found!\n"
if $ encryption_fp ;
2021-05-28 14:09:53 +02:00
$ self - > loginfo ( "WARNING: backup target is configured with master key, but this backup is not encrypted - master key settings will be ignored!" )
if defined ( $ master_keyfile ) && - e $ master_keyfile ;
2020-07-10 11:53:03 +02:00
$ params - > { encrypt } = JSON:: false ;
}
2020-03-11 07:55:53 +01:00
2020-08-06 13:13:48 +02:00
my $ is_template = PVE::QemuConfig - > is_template ( $ self - > { vmlist } - > { $ vmid } ) ;
2020-08-19 17:02:04 +02:00
$ params - > { 'use-dirty-bitmap' } = JSON:: true
2020-10-19 14:18:39 +02:00
if $ qemu_support - > { 'pbs-dirty-bitmap' } && ! $ is_template ;
2020-07-08 11:57:54 +02:00
2021-03-08 16:32:30 +01:00
$ params - > { timeout } = 125 ; # give some time to connect to the backup server
2020-06-26 07:10:57 +02:00
2020-03-19 18:46:49 +01:00
my $ res = eval { mon_cmd ( $ vmid , "backup" , %$ params ) } ;
2020-03-11 07:55:53 +01:00
my $ qmperr = $@ ;
2020-03-19 18:46:49 +01:00
$ backup_job_uuid = $ res - > { UUID } if $ res ;
2020-03-11 07:55:53 +01:00
2020-03-19 18:37:20 +01:00
if ( $ fs_frozen ) {
$ self - > qga_fs_thaw ( $ vmid ) ;
2020-03-11 07:55:53 +01:00
}
die $ qmperr if $ qmperr ;
2020-03-11 07:55:58 +01:00
die "got no uuid for backup task\n" if ! defined ( $ backup_job_uuid ) ;
2020-03-11 07:55:53 +01:00
2020-03-11 07:55:58 +01:00
$ self - > loginfo ( "started backup task '$backup_job_uuid'" ) ;
2020-03-11 07:55:53 +01:00
2020-03-19 18:41:43 +01:00
$ self - > resume_vm_after_job_start ( $ task , $ vmid ) ;
2020-03-11 07:55:53 +01:00
2020-08-19 17:02:01 +02:00
my $ stat = $ query_backup_status_loop - > ( $ self , $ vmid , $ backup_job_uuid , $ qemu_support ) ;
2020-07-07 08:41:22 +02:00
$ task - > { size } = $ stat - > { total } ;
2020-03-11 07:55:53 +01:00
} ;
my $ err = $@ ;
if ( $ err ) {
$ self - > logerr ( $ err ) ;
2020-10-19 16:11:38 +02:00
$ self - > mon_backup_cancel ( $ vmid ) ;
2021-04-20 15:14:37 +02:00
$ self - > resume_vm_after_job_start ( $ task , $ vmid ) ;
2020-03-11 07:55:53 +01:00
}
2020-03-19 18:41:43 +01:00
$ self - > restore_vm_power_state ( $ vmid ) ;
2020-03-11 07:55:53 +01:00
die $ err if $ err ;
}
2020-03-19 18:34:31 +01:00
my $ fork_compressor_pipe = sub {
my ( $ self , $ comp , $ outfileno ) = @ _ ;
my @ pipefd = POSIX:: pipe ( ) ;
my $ cpid = fork ( ) ;
die "unable to fork worker - $!" if ! defined ( $ cpid ) || $ cpid < 0 ;
if ( $ cpid == 0 ) {
eval {
POSIX:: close ( $ pipefd [ 1 ] ) ;
# redirect STDIN
my $ fd = fileno ( STDIN ) ;
close STDIN ;
POSIX:: close ( 0 ) if $ fd != 0 ;
die "unable to redirect STDIN - $!"
if ! open ( STDIN , "<&" , $ pipefd [ 0 ] ) ;
# redirect STDOUT
$ fd = fileno ( STDOUT ) ;
close STDOUT ;
POSIX:: close ( 1 ) if $ fd != 1 ;
die "unable to redirect STDOUT - $!"
if ! open ( STDOUT , ">&" , $ outfileno ) ;
exec ( $ comp ) ;
die "fork compressor '$comp' failed\n" ;
} ;
if ( my $ err = $@ ) {
$ self - > logerr ( $ err ) ;
POSIX:: _exit ( 1 ) ;
}
POSIX:: _exit ( 0 ) ;
kill ( - 9 , $$ ) ;
} else {
POSIX:: close ( $ pipefd [ 0 ] ) ;
$ outfileno = $ pipefd [ 1 ] ;
}
return ( $ cpid , $ outfileno ) ;
} ;
2020-03-11 07:55:53 +01:00
sub archive_vma {
my ( $ self , $ task , $ vmid , $ filename , $ comp ) = @ _ ;
my $ conffile = "$task->{tmpdir}/qemu-server.conf" ;
my $ firewall = "$task->{tmpdir}/qemu-server.fw" ;
my $ opts = $ self - > { vzdump } - > { opts } ;
my $ starttime = time ( ) ;
2011-08-23 07:47:04 +02:00
2012-12-12 15:35:26 +01:00
my $ speed = 0 ;
if ( $ opts - > { bwlimit } ) {
2014-12-10 06:40:21 +01:00
$ speed = $ opts - > { bwlimit } * 1024 ;
2012-12-12 15:35:26 +01:00
}
2011-08-23 07:47:04 +02:00
2014-04-14 11:21:01 +02:00
my $ diskcount = scalar ( @ { $ task - > { disks } } ) ;
2016-03-07 12:41:12 +01:00
if ( PVE::QemuConfig - > is_template ( $ self - > { vmlist } - > { $ vmid } ) || ! $ diskcount ) {
2013-02-22 09:46:59 +01:00
my @ pathlist ;
foreach my $ di ( @ { $ task - > { disks } } ) {
if ( $ di - > { type } eq 'block' || $ di - > { type } eq 'file' ) {
push @ pathlist , "$di->{qmdevice}=$di->{path}" ;
} else {
die "implement me" ;
}
}
2014-04-14 11:21:01 +02:00
if ( ! $ diskcount ) {
$ self - > loginfo ( "backup contains no disks" ) ;
}
2013-02-22 09:46:59 +01:00
my $ outcmd ;
if ( $ comp ) {
2014-12-10 06:40:21 +01:00
$ outcmd = "exec:$comp" ;
2013-02-22 09:46:59 +01:00
} else {
2014-12-10 06:40:21 +01:00
$ outcmd = "exec:cat" ;
2013-02-22 09:46:59 +01:00
}
2017-12-01 11:43:23 +01:00
$ outcmd . = " > $filename" if ! $ opts - > { stdout } ;
2013-02-22 09:46:59 +01:00
2015-11-25 10:20:05 +01:00
my $ cmd = [ '/usr/bin/vma' , 'create' , '-v' , '-c' , $ conffile ] ;
push @$ cmd , '-c' , $ firewall if - e $ firewall ;
push @$ cmd , $ outcmd , @ pathlist ;
2013-02-22 09:46:59 +01:00
$ self - > loginfo ( "starting template backup" ) ;
$ self - > loginfo ( join ( ' ' , @$ cmd ) ) ;
if ( $ opts - > { stdout } ) {
2020-05-06 11:56:33 +02:00
$ self - > cmd ( $ cmd , output = > ">&" . fileno ( $ opts - > { stdout } ) ) ;
2013-02-22 09:46:59 +01:00
} else {
$ self - > cmd ( $ cmd ) ;
}
return ;
}
2020-03-19 18:32:00 +01:00
my $ devlist = _get_task_devlist ( $ task ) ;
2011-08-23 07:47:04 +02:00
2020-03-19 18:41:43 +01:00
$ self - > enforce_vm_running_for_backup ( $ vmid ) ;
2021-03-03 10:56:10 +01:00
$ self - > { qmeventd_fh } = PVE::QemuServer:: register_qmeventd_handle ( $ vmid ) ;
2012-12-12 15:35:26 +01:00
my $ cpid ;
2020-03-11 07:55:57 +01:00
my $ backup_job_uuid ;
2012-12-12 15:35:26 +01:00
eval {
$ SIG { INT } = $ SIG { TERM } = $ SIG { QUIT } = $ SIG { HUP } = $ SIG { PIPE } = sub {
2020-03-19 18:46:49 +01:00
die "interrupted by signal\n" ;
2012-12-12 15:35:26 +01:00
} ;
2022-10-03 15:52:06 +02:00
# Currently, failing to determine Proxmox support is not critical here, because it's only
# used for performance settings like 'max-workers'.
my $ qemu_support = eval { mon_cmd ( $ vmid , "query-proxmox-support" ) } ;
log_warn ( $@ ) if $@ ;
fix #3075: add TPM v1.2 and v2.0 support via swtpm
Starts an instance of swtpm per VM in it's systemd scope, it will
terminate by itself if the VM exits, or be terminated manually if
startup fails.
Before first use, a TPM state is created via swtpm_setup. State is
stored in a 'tpmstate0' volume, treated much the same way as an efidisk.
It is migrated 'offline', the important part here is the creation of the
target volume, the actual data transfer happens via the QEMU device
state migration process.
Move-disk can only work offline, as the disk is not registered with
QEMU, so 'drive-mirror' wouldn't work. swtpm itself has no method of
moving a backing storage at runtime.
For backups, a bit of a workaround is necessary (this may later be
replaced by NBD support in swtpm): During the backup, we attach the
backing file of the TPM as a read-only drive to QEMU, so our backup
code can detect it as a block device and back it up as such, while
ensuring consistency with the rest of disk state ("snapshot" semantic).
The name for the ephemeral drive is specifically chosen as
'drive-tpmstate0-backup', diverging from our usual naming scheme with
the '-backup' suffix, to avoid it ever being treated as a regular drive
from the rest of the stack in case it gets left over after a backup for
some reason (shouldn't happen).
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-10-04 17:29:20 +02:00
$ attach_tpmstate_drive - > ( $ self , $ task , $ vmid ) ;
2012-12-12 15:35:26 +01:00
my $ outfh ;
if ( $ opts - > { stdout } ) {
$ outfh = $ opts - > { stdout } ;
} else {
$ outfh = IO::File - > new ( $ filename , "w" ) ||
die "unable to open file '$filename' - $!\n" ;
}
2020-03-19 18:34:31 +01:00
my $ outfileno = fileno ( $ outfh ) ;
2012-12-12 15:35:26 +01:00
if ( $ comp ) {
2020-03-19 18:34:31 +01:00
( $ cpid , $ outfileno ) = $ fork_compressor_pipe - > ( $ self , $ comp , $ outfileno ) ;
2012-12-12 15:35:26 +01:00
}
2020-03-19 18:48:41 +01:00
my $ qmpclient = PVE::QMPClient - > new ( ) ;
my $ backup_cb = sub {
my ( $ vmid , $ resp ) = @ _ ;
$ backup_job_uuid = $ resp - > { return } - > { UUID } ;
} ;
my $ add_fd_cb = sub {
2012-12-12 15:35:26 +01:00
my ( $ vmid , $ resp ) = @ _ ;
2015-11-25 10:20:05 +01:00
my $ params = {
'backup-file' = > "/dev/fdname/backup" ,
speed = > $ speed ,
'config-file' = > $ conffile ,
devlist = > $ devlist
} ;
$ params - > { 'firewall-file' } = $ firewall if - e $ firewall ;
2022-10-03 15:52:06 +02:00
add_backup_performance_options ( $ params , $ opts - > { performance } , $ qemu_support ) ;
2020-03-19 18:46:49 +01:00
2015-11-25 10:20:05 +01:00
$ qmpclient - > queue_cmd ( $ vmid , $ backup_cb , 'backup' , %$ params ) ;
2012-12-12 15:35:26 +01:00
} ;
2020-03-19 18:46:49 +01:00
$ qmpclient - > queue_cmd ( $ vmid , $ add_fd_cb , 'getfd' , fd = > $ outfileno , fdname = > "backup" ) ;
2014-12-09 11:12:56 +01:00
2020-05-03 11:13:36 +02:00
my $ fs_frozen = $ self - > qga_fs_freeze ( $ task , $ vmid ) ;
2014-12-10 06:40:21 +01:00
2020-03-11 07:55:53 +01:00
eval { $ qmpclient - > queue_execute ( 30 ) } ;
2018-05-23 11:07:39 +02:00
my $ qmperr = $@ ;
2012-12-12 15:35:26 +01:00
2020-03-19 18:37:20 +01:00
if ( $ fs_frozen ) {
$ self - > qga_fs_thaw ( $ vmid ) ;
2014-12-09 11:12:56 +01:00
}
2020-03-19 18:46:49 +01:00
2018-05-23 11:07:39 +02:00
die $ qmperr if $ qmperr ;
2014-12-10 06:40:21 +01:00
die $ qmpclient - > { errors } - > { $ vmid } if $ qmpclient - > { errors } - > { $ vmid } ;
2012-12-12 15:35:26 +01:00
if ( $ cpid ) {
2014-12-10 06:40:21 +01:00
POSIX:: close ( $ outfileno ) == 0 ||
2012-12-12 15:35:26 +01:00
die "close output file handle failed\n" ;
}
2020-03-11 07:55:57 +01:00
die "got no uuid for backup task\n" if ! defined ( $ backup_job_uuid ) ;
2012-12-12 15:35:26 +01:00
2020-03-11 07:55:57 +01:00
$ self - > loginfo ( "started backup task '$backup_job_uuid'" ) ;
2012-12-12 15:35:26 +01:00
2020-03-19 18:41:43 +01:00
$ self - > resume_vm_after_job_start ( $ task , $ vmid ) ;
2012-12-12 15:35:26 +01:00
2020-03-11 07:55:57 +01:00
$ query_backup_status_loop - > ( $ self , $ vmid , $ backup_job_uuid ) ;
2012-12-12 15:35:26 +01:00
} ;
my $ err = $@ ;
2013-01-16 13:21:27 +01:00
if ( $ err ) {
2013-02-28 10:47:55 +01:00
$ self - > logerr ( $ err ) ;
2020-10-19 16:11:38 +02:00
$ self - > mon_backup_cancel ( $ vmid ) ;
2021-04-20 15:14:37 +02:00
$ self - > resume_vm_after_job_start ( $ task , $ vmid ) ;
2013-01-16 13:21:27 +01:00
}
2020-03-19 18:41:43 +01:00
$ self - > restore_vm_power_state ( $ vmid ) ;
2012-12-12 15:35:26 +01:00
if ( $ err ) {
2014-12-10 06:40:21 +01:00
if ( $ cpid ) {
kill ( 9 , $ cpid ) ;
2012-12-12 15:35:26 +01:00
waitpid ( $ cpid , 0 ) ;
}
die $ err ;
}
if ( $ cpid && ( waitpid ( $ cpid , 0 ) > 0 ) ) {
my $ stat = $? ;
my $ ec = $ stat >> 8 ;
my $ signal = $ stat & 127 ;
if ( $ ec || $ signal ) {
2014-12-10 06:40:21 +01:00
die "$comp failed - wrong exit status $ec" .
2012-12-12 15:35:26 +01:00
( $ signal ? " (signal $signal)\n" : "\n" ) ;
}
}
}
2020-03-19 18:32:00 +01:00
sub _get_task_devlist {
my ( $ task ) = @ _ ;
my $ devlist = '' ;
foreach my $ di ( @ { $ task - > { disks } } ) {
if ( $ di - > { type } eq 'block' || $ di - > { type } eq 'file' ) {
$ devlist . = ',' if $ devlist ;
$ devlist . = $ di - > { qmdevice } ;
} else {
die "implement me (type '$di->{type}')" ;
}
}
return $ devlist ;
}
2020-03-19 18:37:20 +01:00
sub qga_fs_freeze {
2020-05-03 11:13:36 +02:00
my ( $ self , $ task , $ vmid ) = @ _ ;
2021-01-20 13:32:04 +01:00
return if ! $ self - > { vmlist } - > { $ vmid } - > { agent } || $ task - > { mode } eq 'stop' || ! $ self - > { vm_was_running } || $ self - > { vm_was_paused } ;
2020-03-19 18:37:20 +01:00
if ( ! PVE::QemuServer:: qga_check_running ( $ vmid , 1 ) ) {
$ self - > loginfo ( "skipping guest-agent 'fs-freeze', agent configured but not running?" ) ;
return ;
}
2023-02-23 15:18:03 +01:00
my $ freeze = PVE::QemuServer:: get_qga_key ( $ self - > { vmlist } - > { $ vmid } , 'freeze-fs-on-backup' ) // 1 ;
if ( ! $ freeze ) {
$ self - > loginfo ( "skipping guest-agent 'fs-freeze', disabled in VM options" ) ;
return ;
}
2020-03-19 18:37:20 +01:00
$ self - > loginfo ( "issuing guest-agent 'fs-freeze' command" ) ;
eval { mon_cmd ( $ vmid , "guest-fsfreeze-freeze" ) } ;
$ self - > logerr ( $@ ) if $@ ;
return 1 ; # even on mon command error, ensure we always thaw again
}
# only call if fs_freeze return 1
sub qga_fs_thaw {
my ( $ self , $ vmid ) = @ _ ;
$ self - > loginfo ( "issuing guest-agent 'fs-thaw' command" ) ;
eval { mon_cmd ( $ vmid , "guest-fsfreeze-thaw" ) } ;
$ self - > logerr ( $@ ) if $@ ;
}
2020-03-19 18:41:43 +01:00
# we need a running QEMU/KVM process for backup, starts a paused (prelaunch)
# one if VM isn't already running
sub enforce_vm_running_for_backup {
my ( $ self , $ vmid ) = @ _ ;
if ( PVE::QemuServer:: check_running ( $ vmid ) ) {
$ self - > { vm_was_running } = 1 ;
return ;
}
eval {
$ self - > loginfo ( "starting kvm to execute backup task" ) ;
# start with skiplock
2020-03-30 13:41:30 +02:00
my $ params = {
skiplock = > 1 ,
2020-08-06 13:13:48 +02:00
skiptemplate = > 1 ,
2020-03-30 13:41:30 +02:00
paused = > 1 ,
} ;
PVE::QemuServer:: vm_start ( $ self - > { storecfg } , $ vmid , $ params ) ;
2020-03-19 18:41:43 +01:00
} ;
die $@ if $@ ;
}
2021-04-20 15:14:37 +02:00
# resume VM again once in a clear state (stop mode backup of running VM)
2020-03-19 18:41:43 +01:00
sub resume_vm_after_job_start {
my ( $ self , $ task , $ vmid ) = @ _ ;
2021-01-20 13:32:04 +01:00
return if ! $ self - > { vm_was_running } || $ self - > { vm_was_paused } ;
2020-03-19 18:41:43 +01:00
if ( my $ stoptime = $ task - > { vmstoptime } ) {
my $ delay = time ( ) - $ task - > { vmstoptime } ;
$ task - > { vmstoptime } = undef ; # avoid printing 'online after ..' twice
$ self - > loginfo ( "resuming VM again after $delay seconds" ) ;
} else {
$ self - > loginfo ( "resuming VM again" ) ;
}
2021-10-27 13:34:54 +02:00
mon_cmd ( $ vmid , 'cont' , timeout = > 45 ) ;
2020-03-19 18:41:43 +01:00
}
# stop again if VM was not running before
sub restore_vm_power_state {
my ( $ self , $ vmid ) = @ _ ;
# we always let VMs keep running
return if $ self - > { vm_was_running } ;
eval {
my $ resp = mon_cmd ( $ vmid , 'query-status' ) ;
my $ status = $ resp && $ resp - > { status } ? $ resp - > { status } : 'unknown' ;
if ( $ status eq 'prelaunch' ) {
$ self - > loginfo ( "stopping kvm after backup task" ) ;
PVE::QemuServer:: vm_stop ( $ self - > { storecfg } , $ vmid , 1 ) ;
} else {
$ self - > loginfo ( "kvm status changed after backup ('$status') - keep VM running" ) ;
}
} ;
warn $@ if $@ ;
}
sub mon_backup_cancel {
my ( $ self , $ vmid ) = @ _ ;
$ self - > loginfo ( "aborting backup job" ) ;
eval { mon_cmd ( $ vmid , 'backup-cancel' ) } ;
$ self - > logerr ( $@ ) if $@ ;
}
2012-12-12 15:35:26 +01:00
sub snapshot {
my ( $ self , $ task , $ vmid ) = @ _ ;
# nothing to do
2011-08-23 07:47:04 +02:00
}
sub cleanup {
my ( $ self , $ task , $ vmid ) = @ _ ;
fix #3075: add TPM v1.2 and v2.0 support via swtpm
Starts an instance of swtpm per VM in it's systemd scope, it will
terminate by itself if the VM exits, or be terminated manually if
startup fails.
Before first use, a TPM state is created via swtpm_setup. State is
stored in a 'tpmstate0' volume, treated much the same way as an efidisk.
It is migrated 'offline', the important part here is the creation of the
target volume, the actual data transfer happens via the QEMU device
state migration process.
Move-disk can only work offline, as the disk is not registered with
QEMU, so 'drive-mirror' wouldn't work. swtpm itself has no method of
moving a backing storage at runtime.
For backups, a bit of a workaround is necessary (this may later be
replaced by NBD support in swtpm): During the backup, we attach the
backing file of the TPM as a read-only drive to QEMU, so our backup
code can detect it as a block device and back it up as such, while
ensuring consistency with the rest of disk state ("snapshot" semantic).
The name for the ephemeral drive is specifically chosen as
'drive-tpmstate0-backup', diverging from our usual naming scheme with
the '-backup' suffix, to avoid it ever being treated as a regular drive
from the rest of the stack in case it gets left over after a backup for
some reason (shouldn't happen).
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2021-10-04 17:29:20 +02:00
$ detach_tpmstate_drive - > ( $ task , $ vmid ) ;
2020-10-19 14:18:38 +02:00
if ( $ self - > { qmeventd_fh } ) {
close ( $ self - > { qmeventd_fh } ) ;
}
2011-08-23 07:47:04 +02:00
}
1 ;