2011-08-23 09:47:04 +04:00
package PVE::QemuServer ;
use strict ;
2013-10-01 15:14:49 +04:00
use warnings ;
2019-03-21 14:53:45 +03:00
2019-10-18 12:24:21 +03:00
use Cwd 'abs_path' ;
use Digest::SHA ;
use Fcntl ':flock' ;
use Fcntl ;
2011-08-23 09:47:04 +04:00
use File::Basename ;
2019-10-18 12:24:21 +03:00
use File::Copy qw( copy ) ;
2011-08-23 09:47:04 +04:00
use File::Path ;
use File::stat ;
use Getopt::Long ;
2019-10-18 12:24:21 +03:00
use IO::Dir ;
use IO::File ;
use IO::Handle ;
use IO::Select ;
use IO::Socket::UNIX ;
2011-08-23 09:47:04 +04:00
use IPC::Open3 ;
2012-05-29 16:01:50 +04:00
use JSON ;
2019-06-11 13:13:52 +03:00
use MIME::Base64 ;
2019-10-18 12:24:21 +03:00
use POSIX ;
use Storable qw( dclone ) ;
use Time::HiRes qw( gettimeofday ) ;
use URI::Escape ;
2019-11-06 15:36:53 +03:00
use UUID ;
2019-10-18 12:24:21 +03:00
2011-08-23 09:47:04 +04:00
use PVE::Cluster qw( cfs_register_file cfs_read_file cfs_write_file cfs_lock_file ) ;
2019-11-11 13:28:27 +03:00
use PVE::DataCenterConfig ;
2019-10-18 12:24:21 +03:00
use PVE::Exception qw( raise raise_param_exc ) ;
use PVE::GuestHelpers ;
2011-08-23 09:47:04 +04:00
use PVE::INotify ;
2019-10-18 12:24:21 +03:00
use PVE::JSONSchema qw( get_standard_option ) ;
2011-08-23 09:47:04 +04:00
use PVE::ProcFSTools ;
2012-12-12 18:35:26 +04:00
use PVE::RPCEnvironment ;
2019-10-18 12:24:21 +03:00
use PVE::Storage ;
2018-11-16 18:17:51 +03:00
use PVE::SysFSTools ;
2018-06-15 12:00:53 +03:00
use PVE::Systemd ;
2019-11-21 17:53:42 +03:00
use PVE::Tools qw( run_command lock_file lock_file_full file_read_firstline file_get_contents dir_glob_foreach get_host_arch $IPV6RE ) ;
2019-10-18 12:24:21 +03:00
use PVE::QMPClient ;
use PVE::QemuConfig ;
2020-01-14 16:30:36 +03:00
use PVE::QemuServer::Helpers qw( min_version config_aware_timeout ) ;
2019-10-18 12:24:21 +03:00
use PVE::QemuServer::Cloudinit ;
2020-01-16 18:40:48 +03:00
use PVE::QemuServer::CPUConfig qw( print_cpu_device get_cpu_options ) ;
2020-03-02 13:33:44 +03:00
use PVE::QemuServer::Drive qw( is_valid_drivename drive_is_cloudinit drive_is_cdrom parse_drive print_drive foreach_drive foreach_volid ) ;
2019-11-19 14:23:49 +03:00
use PVE::QemuServer::Machine ;
2019-10-18 12:24:21 +03:00
use PVE::QemuServer::Memory ;
2019-11-19 14:23:47 +03:00
use PVE::QemuServer::Monitor qw( mon_cmd ) ;
2019-10-18 12:24:21 +03:00
use PVE::QemuServer::PCI qw( print_pci_addr print_pcie_addr print_pcie_root_port ) ;
use PVE::QemuServer::USB qw( parse_usb_device ) ;
2011-08-23 09:47:04 +04:00
2018-03-16 15:58:27 +03:00
my $ EDK2_FW_BASE = '/usr/share/pve-edk2-firmware/' ;
2018-11-12 16:10:36 +03:00
my $ OVMF = {
x86_64 = > [
"$EDK2_FW_BASE/OVMF_CODE.fd" ,
"$EDK2_FW_BASE/OVMF_VARS.fd"
] ,
aarch64 = > [
"$EDK2_FW_BASE/AAVMF_CODE.fd" ,
"$EDK2_FW_BASE/AAVMF_VARS.fd"
] ,
} ;
2016-09-08 12:03:01 +03:00
2011-09-12 13:03:14 +04:00
my $ cpuinfo = PVE::ProcFSTools:: read_cpuinfo ( ) ;
2011-08-23 09:47:04 +04:00
2011-09-12 14:26:00 +04:00
# Note about locking: we use flock on the config file protect
2011-08-23 09:47:04 +04:00
# against concurent actions.
# Aditionaly, we have a 'lock' setting in the config file. This
2012-09-07 15:07:23 +04:00
# can be set to 'migrate', 'backup', 'snapshot' or 'rollback'. Most actions are not
2011-08-23 09:47:04 +04:00
# allowed when such lock is set. But you can ignore this kind of
# lock with the --skiplock flag.
2012-05-30 14:08:33 +04:00
cfs_register_file ( '/qemu-server/' ,
2012-02-02 17:01:08 +04:00
\ & parse_vm_config ,
\ & write_vm_config ) ;
2011-08-23 09:47:04 +04:00
2011-09-14 14:02:08 +04:00
PVE::JSONSchema:: register_standard_option ( 'pve-qm-stateuri' , {
description = > "Some command save/restore state from this location." ,
type = > 'string' ,
maxLength = > 128 ,
optional = > 1 ,
} ) ;
2018-09-14 15:08:43 +03:00
PVE::JSONSchema:: register_standard_option ( 'pve-qemu-machine' , {
description = > "Specifies the Qemu machine type." ,
type = > 'string' ,
implement PVE Version addition for QEMU machine
With our QEMU 4.1.1 package we can pass a additional internal version
to QEMU's machine, it will be split out there and ignored, but
returned on a QMP 'query-machines' call.
This allows us to use it for increasing the granularity with which we
can roll-out HW layout changes/additions for VMs. Until now we
required a machine version bump, happening normally every major
release of QEMU, with seldom, for us irrelevant, exceptions.
This often delays rolling out a feature, which would break
live-migration, by several months. That can now be avoided, the new
"pve-version" component of the machine can be bumped at will, and
thus we are much more flexible.
That versions orders after the ($major, $minor) version components
from an stable release - it can thus also be reset on the next
release.
The implementation extends the qemu-machine REGEX, remembers
"pve-version" when doing a "query-machines" and integrates support
into the min_version and extract_version helpers.
We start out with a version of 1.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
Reviewed-by: Stefan Reiter <s.reiter@proxmox.com>
2019-11-25 13:18:13 +03:00
pattern = > '(pc|pc(-i440fx)?-\d+(\.\d+)+(\+pve\d+)?(\.pxe)?|q35|pc-q35-\d+(\.\d+)+(\+pve\d+)?(\.pxe)?|virt(?:-\d+(\.\d+)+)?(\+pve\d+)?)' ,
2018-09-14 15:08:43 +03:00
maxLength = > 40 ,
optional = > 1 ,
} ) ;
2011-08-23 09:47:04 +04:00
#no warnings 'redefine';
2015-05-28 16:59:22 +03:00
sub cgroups_write {
my ( $ controller , $ vmid , $ option , $ value ) = @ _ ;
2015-05-29 09:23:13 +03:00
my $ path = "/sys/fs/cgroup/$controller/qemu.slice/$vmid.scope/$option" ;
PVE::ProcFSTools:: write_proc_entry ( $ path , $ value ) ;
2015-05-28 16:59:22 +03:00
}
2019-12-10 13:05:39 +03:00
my $ nodename_cache ;
sub nodename {
$ nodename_cache // = PVE::INotify:: nodename ( ) ;
return $ nodename_cache ;
}
2011-08-23 09:47:04 +04:00
2016-03-30 13:20:13 +03:00
my $ watchdog_fmt = {
model = > {
default_key = > 1 ,
type = > 'string' ,
enum = > [ qw( i6300esb ib700 ) ] ,
description = > "Watchdog type to emulate." ,
default = > 'i6300esb' ,
optional = > 1 ,
} ,
action = > {
type = > 'string' ,
enum = > [ qw( reset shutdown poweroff pause debug none ) ] ,
description = > "The action to perform if after activation the guest fails to poll the watchdog in time." ,
optional = > 1 ,
} ,
} ;
PVE::JSONSchema:: register_format ( 'pve-qm-watchdog' , $ watchdog_fmt ) ;
2018-08-01 21:29:04 +03:00
my $ agent_fmt = {
enabled = > {
description = > "Enable/disable Qemu GuestAgent." ,
type = > 'boolean' ,
default = > 0 ,
default_key = > 1 ,
} ,
fstrim_cloned_disks = > {
description = > "Run fstrim after cloning/moving a disk." ,
type = > 'boolean' ,
optional = > 1 ,
default = > 0
} ,
2019-11-18 09:46:12 +03:00
type = > {
description = > "Select the agent type" ,
type = > 'string' ,
default = > 'virtio' ,
optional = > 1 ,
enum = > [ qw( virtio isa ) ] ,
} ,
2018-08-01 21:29:04 +03:00
} ;
2018-11-09 15:31:09 +03:00
my $ vga_fmt = {
type = > {
description = > "Select the VGA type." ,
type = > 'string' ,
default = > 'std' ,
optional = > 1 ,
default_key = > 1 ,
2018-12-06 12:17:26 +03:00
enum = > [ qw( cirrus qxl qxl2 qxl3 qxl4 none serial0 serial1 serial2 serial3 std virtio vmware ) ] ,
2018-11-09 15:31:09 +03:00
} ,
memory = > {
description = > "Sets the VGA memory (in MiB). Has no effect with serial display." ,
type = > 'integer' ,
optional = > 1 ,
minimum = > 4 ,
maximum = > 512 ,
} ,
} ;
2019-02-22 13:38:33 +03:00
my $ ivshmem_fmt = {
size = > {
type = > 'integer' ,
minimum = > 1 ,
description = > "The size of the file in MB." ,
} ,
name = > {
type = > 'string' ,
pattern = > '[a-zA-Z0-9\-]+' ,
optional = > 1 ,
format_description = > 'string' ,
description = > "The name of the file. Will be prefixed with 'pve-shm-'. Default is the VMID. Will be deleted when the VM is stopped." ,
} ,
} ;
2019-07-19 16:15:44 +03:00
my $ audio_fmt = {
device = > {
type = > 'string' ,
enum = > [ qw( ich9-intel-hda intel-hda AC97 ) ] ,
description = > "Configure an audio device."
} ,
driver = > {
type = > 'string' ,
enum = > [ 'spice' ] ,
default = > 'spice' ,
optional = > 1 ,
description = > "Driver backend for the audio device."
} ,
} ;
2019-08-22 18:33:18 +03:00
my $ spice_enhancements_fmt = {
foldersharing = > {
type = > 'boolean' ,
optional = > 1 ,
2019-09-04 15:44:04 +03:00
default = > '0' ,
2019-08-22 18:33:18 +03:00
description = > "Enable folder sharing via SPICE. Needs Spice-WebDAV daemon installed in the VM."
} ,
videostreaming = > {
type = > 'string' ,
enum = > [ 'off' , 'all' , 'filter' ] ,
2019-09-04 15:44:04 +03:00
default = > 'off' ,
2019-08-22 18:33:18 +03:00
optional = > 1 ,
description = > "Enable video streaming. Uses compression for detected video streams."
} ,
} ;
fix #2264: add virtio-rng device
Allow a user to add a virtio-rng-pci (an emulated hardware random
number generator) to a VM with the rng0 setting. The setting is
version_guard()-ed.
Limit the selection of entropy source to one of three:
/dev/urandom (preferred): Non-blocking kernel entropy source
/dev/random: Blocking kernel source
/dev/hwrng: Hardware RNG on the host for passthrough
QEMU itself defaults to /dev/urandom (or the equivalent getrandom()
call) if no source file is given, but I don't fully trust that
behaviour to stay constant, considering the documentation [0] already
disagrees with the code [1], so let's always specify the file ourselves.
/dev/urandom is preferred, since it prevents host entropy starvation.
The quality of randomness is still good enough to emulate a hwrng, since
a) it's still seeded from the kernel's true entropy pool periodically
and b) it's mixed with true entropy in the guest as well.
Additionally, all sources about entropy predicition attacks I could find
mention that to predict /dev/urandom results, /dev/random has to be
accessed or manipulated in one way or the other - this is not possible
from a VM however, as the entropy we're talking about comes from the
*hosts* blocking pool.
More about the entropy and security implications of the non-blocking
interface in [2] and [3].
Note further that only one /dev/hwrng exists at any given time, if
multiple RNGs are available, only the one selected in
'/sys/devices/virtual/misc/hw_random/rng_current' will feed the file.
Selecting this is left as an exercise to the user, if at all required.
We limit the available entropy to 1 KiB/s by default, but allow the user
to override this. Interesting to note is that the limiter does not work
linearly, i.e. max_bytes=1024/period=1000 means that up to 1 KiB of data
becomes available on a 1000 millisecond timer, not that 1 KiB is
streamed to the guest over the course of one second - hence the
configurable period.
The default used here is the same as given in the QEMU documentation [0]
and has been verified to affect entropy availability in a guest by
measuring /dev/random throughput. 1 KiB/s is enough to avoid any
early-boot entropy shortages, and already has a significant impact on
/dev/random availability in the guest.
[0] https://wiki.qemu.org/Features/VirtIORNG
[1] https://git.qemu.org/?p=qemu.git;a=blob;f=crypto/random-platform.c;h=f92f96987d7d262047c7604b169a7fdf11236107;hb=HEAD
[2] https://lwn.net/Articles/261804/
[3] https://lwn.net/Articles/808575/
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-02-20 20:10:44 +03:00
my $ rng_fmt = {
source = > {
type = > 'string' ,
enum = > [ '/dev/urandom' , '/dev/random' , '/dev/hwrng' ] ,
default_key = > 1 ,
description = > "The file on the host to gather entropy from. In most"
. " cases /dev/urandom should be preferred over /dev/random"
. " to avoid entropy-starvation issues on the host. Using"
. " urandom does *not* decrease security in any meaningful"
. " way, as it's still seeded from real entropy, and the"
. " bytes provided will most likely be mixed with real"
. " entropy on the guest as well. /dev/hwrng can be used"
. " to pass through a hardware RNG from the host." ,
} ,
max_bytes = > {
type = > 'integer' ,
description = > "Maximum bytes of entropy injected into the guest every"
. " 'period' milliseconds. Prefer a lower value when using"
. " /dev/random as source. Use 0 to disable limiting"
. " (potentially dangerous!)." ,
optional = > 1 ,
# default is 1 KiB/s, provides enough entropy to the guest to avoid
# boot-starvation issues (e.g. systemd etc...) while allowing no chance
# of overwhelming the host, provided we're reading from /dev/urandom
default = > 1024 ,
} ,
period = > {
type = > 'integer' ,
description = > "Every 'period' milliseconds the entropy-injection quota"
. " is reset, allowing the guest to retrieve another"
. " 'max_bytes' of entropy." ,
optional = > 1 ,
default = > 1000 ,
} ,
} ;
2011-08-23 09:47:04 +04:00
my $ confdesc = {
onboot = > {
optional = > 1 ,
type = > 'boolean' ,
description = > "Specifies whether a VM will be started during system bootup." ,
default = > 0 ,
} ,
autostart = > {
optional = > 1 ,
type = > 'boolean' ,
description = > "Automatic restart after crash (currently ignored)." ,
default = > 0 ,
} ,
2011-10-10 18:46:53 +04:00
hotplug = > {
optional = > 1 ,
2015-01-27 09:16:22 +03:00
type = > 'string' , format = > 'pve-hotplug-features' ,
description = > "Selectively enable hotplug features. This is a comma separated list of hotplug features: 'network', 'disk', 'cpu', 'memory' and 'usb'. Use '0' to disable hotplug completely. Value '1' is an alias for the default 'network,disk,usb'." ,
default = > 'network,disk,usb' ,
2011-10-10 18:46:53 +04:00
} ,
2011-08-23 09:47:04 +04:00
reboot = > {
optional = > 1 ,
type = > 'boolean' ,
description = > "Allow reboot. If set to '0' the VM exit on reboot." ,
default = > 1 ,
} ,
lock = > {
optional = > 1 ,
type = > 'string' ,
description = > "Lock/unlock the VM." ,
2019-03-14 19:04:47 +03:00
enum = > [ qw( backup clone create migrate rollback snapshot snapshot-delete suspending suspended ) ] ,
2011-08-23 09:47:04 +04:00
} ,
cpulimit = > {
optional = > 1 ,
2015-06-02 17:03:25 +03:00
type = > 'number' ,
2016-05-19 14:13:25 +03:00
description = > "Limit of CPU usage." ,
verbose_description = > "Limit of CPU usage.\n\nNOTE: If the computer has 2 CPUs, it has total of '2' CPU time. Value '0' indicates no CPU limit." ,
2011-08-23 09:47:04 +04:00
minimum = > 0 ,
2015-06-02 17:03:25 +03:00
maximum = > 128 ,
2016-05-19 14:13:25 +03:00
default = > 0 ,
2011-08-23 09:47:04 +04:00
} ,
cpuunits = > {
optional = > 1 ,
type = > 'integer' ,
2016-05-19 14:13:25 +03:00
description = > "CPU weight for a VM." ,
2017-10-09 14:40:23 +03:00
verbose_description = > "CPU weight for a VM. Argument is used in the kernel fair scheduler. The larger the number is, the more CPU time this VM gets. Number is relative to weights of all the other running VMs." ,
minimum = > 2 ,
maximum = > 262144 ,
2016-10-27 08:23:32 +03:00
default = > 1024 ,
2011-08-23 09:47:04 +04:00
} ,
memory = > {
optional = > 1 ,
type = > 'integer' ,
2011-09-16 08:57:54 +04:00
description = > "Amount of RAM for the VM in MB. This is the maximum available memory when you use the balloon device." ,
2011-08-23 09:47:04 +04:00
minimum = > 16 ,
default = > 512 ,
} ,
2011-09-16 06:46:26 +04:00
balloon = > {
optional = > 1 ,
type = > 'integer' ,
2012-12-19 10:24:39 +04:00
description = > "Amount of target RAM for the VM in MB. Using zero disables the ballon driver." ,
minimum = > 0 ,
} ,
shares = > {
optional = > 1 ,
type = > 'integer' ,
2018-05-14 15:03:03 +03:00
description = > "Amount of memory shares for auto-ballooning. The larger the number is, the more memory this VM gets. Number is relative to weights of all other running VMs. Using zero disables auto-ballooning. Auto-ballooning is done by pvestatd." ,
2012-12-19 10:24:39 +04:00
minimum = > 0 ,
maximum = > 50000 ,
default = > 1000 ,
2011-09-16 06:46:26 +04:00
} ,
2011-08-23 09:47:04 +04:00
keyboard = > {
optional = > 1 ,
type = > 'string' ,
2018-05-22 17:44:33 +03:00
description = > "Keybord layout for vnc server. Default is read from the '/etc/pve/datacenter.cfg' configuration file." .
2017-11-07 13:09:30 +03:00
"It should not be necessary to set it." ,
2012-01-09 14:25:25 +04:00
enum = > PVE::Tools:: kvmkeymaplist ( ) ,
2017-11-07 13:09:30 +03:00
default = > undef ,
2011-08-23 09:47:04 +04:00
} ,
name = > {
optional = > 1 ,
2012-03-13 10:00:27 +04:00
type = > 'string' , format = > 'dns-name' ,
2011-08-23 09:47:04 +04:00
description = > "Set a name for the VM. Only used on the configuration web interface." ,
} ,
2012-07-30 16:58:40 +04:00
scsihw = > {
optional = > 1 ,
type = > 'string' ,
2016-05-19 14:13:25 +03:00
description = > "SCSI controller model" ,
2015-03-27 05:41:52 +03:00
enum = > [ qw( lsi lsi53c810 virtio-scsi-pci virtio-scsi-single megasas pvscsi ) ] ,
2012-07-30 16:58:40 +04:00
default = > 'lsi' ,
} ,
2011-08-23 09:47:04 +04:00
description = > {
optional = > 1 ,
type = > 'string' ,
2012-03-01 11:13:14 +04:00
description = > "Description for the VM. Only used on the configuration web interface. This is saved as comment inside the configuration file." ,
2011-08-23 09:47:04 +04:00
} ,
ostype = > {
optional = > 1 ,
type = > 'string' ,
2016-11-17 17:03:38 +03:00
enum = > [ qw( other wxp w2k w2k3 w2k8 wvista win7 win8 win10 l24 l26 solaris ) ] ,
2016-05-19 14:13:25 +03:00
description = > "Specify guest operating system." ,
verbose_description = > << EODESC ,
Specify guest operating system . This is used to enable special
optimization / features for specific operating systems:
[ horizontal ]
other ; ; unspecified OS
wxp ; ; Microsoft Windows XP
w2k ; ; Microsoft Windows 2000
w2k3 ; ; Microsoft Windows 2003
w2k8 ; ; Microsoft Windows 2008
wvista ; ; Microsoft Windows Vista
win7 ; ; Microsoft Windows 7
2017-11-07 10:27:37 +03:00
win8 ; ; Microsoft Windows 8 /2012/ 2012 r2
win10 ; ; Microsoft Windows 10 / 2016
2016-05-19 14:13:25 +03:00
l24 ; ; Linux 2.4 Kernel
2019-11-20 17:03:10 +03:00
l26 ; ; Linux 2.6 - 5 . X Kernel
2016-05-19 14:13:25 +03:00
solaris ; ; Solaris /OpenSolaris/ OpenIndiania kernel
2011-08-23 09:47:04 +04:00
EODESC
} ,
boot = > {
optional = > 1 ,
type = > 'string' ,
description = > "Boot on floppy (a), hard disk (c), CD-ROM (d), or network (n)." ,
pattern = > '[acdn]{1,4}' ,
2011-09-29 09:43:05 +04:00
default = > 'cdn' ,
2011-08-23 09:47:04 +04:00
} ,
bootdisk = > {
optional = > 1 ,
type = > 'string' , format = > 'pve-qm-bootdisk' ,
description = > "Enable booting from specified disk." ,
2012-08-29 11:19:21 +04:00
pattern = > '(ide|sata|scsi|virtio)\d+' ,
2011-08-23 09:47:04 +04:00
} ,
smp = > {
optional = > 1 ,
type = > 'integer' ,
description = > "The number of CPUs. Please use option -sockets instead." ,
minimum = > 1 ,
default = > 1 ,
} ,
sockets = > {
optional = > 1 ,
type = > 'integer' ,
description = > "The number of CPU sockets." ,
minimum = > 1 ,
default = > 1 ,
} ,
cores = > {
optional = > 1 ,
type = > 'integer' ,
description = > "The number of cores per socket." ,
minimum = > 1 ,
default = > 1 ,
} ,
add numa options v3
This enable numa support inside the guest, and share the memory and cores across the sockets numa nodes.
numa: 0|1
example:
-------
sockets:2
cores:2
memory:4096
numa: 1
qemu command line
-----------------
-object memory-backend-ram,size=2048,id=ram-node0
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0
-object memory-backend-ram,size=2048,id=ram-node1
-numa node,nodeid=1,cpus=2-3,memdev=ram-node1
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-12-03 18:23:47 +03:00
numa = > {
optional = > 1 ,
type = > 'boolean' ,
2016-03-16 16:53:07 +03:00
description = > "Enable/disable NUMA." ,
add numa options v3
This enable numa support inside the guest, and share the memory and cores across the sockets numa nodes.
numa: 0|1
example:
-------
sockets:2
cores:2
memory:4096
numa: 1
qemu command line
-----------------
-object memory-backend-ram,size=2048,id=ram-node0
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0
-object memory-backend-ram,size=2048,id=ram-node1
-numa node,nodeid=1,cpus=2-3,memdev=ram-node1
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-12-03 18:23:47 +03:00
default = > 0 ,
} ,
2016-06-15 06:04:02 +03:00
hugepages = > {
optional = > 1 ,
type = > 'string' ,
description = > "Enable/disable hugepages memory." ,
enum = > [ qw( any 2 1024 ) ] ,
} ,
2015-01-09 18:30:35 +03:00
vcpus = > {
2014-01-07 16:32:50 +04:00
optional = > 1 ,
type = > 'integer' ,
2015-01-09 18:30:35 +03:00
description = > "Number of hotplugged vcpus." ,
2014-01-07 16:32:50 +04:00
minimum = > 1 ,
2015-01-09 18:30:35 +03:00
default = > 0 ,
2014-01-07 16:32:50 +04:00
} ,
2011-08-23 09:47:04 +04:00
acpi = > {
optional = > 1 ,
type = > 'boolean' ,
description = > "Enable/disable ACPI." ,
default = > 1 ,
} ,
2012-09-04 08:31:44 +04:00
agent = > {
2012-09-03 11:51:08 +04:00
optional = > 1 ,
2018-08-01 21:29:04 +03:00
description = > "Enable/disable Qemu GuestAgent and its properties." ,
type = > 'string' ,
format = > $ agent_fmt ,
2012-09-03 11:51:08 +04:00
} ,
2011-08-23 09:47:04 +04:00
kvm = > {
optional = > 1 ,
type = > 'boolean' ,
description = > "Enable/disable KVM hardware virtualization." ,
default = > 1 ,
} ,
tdf = > {
optional = > 1 ,
type = > 'boolean' ,
2012-09-26 14:42:03 +04:00
description = > "Enable/disable time drift fix." ,
default = > 0 ,
2011-08-23 09:47:04 +04:00
} ,
2011-09-12 14:26:00 +04:00
localtime = > {
2011-08-23 09:47:04 +04:00
optional = > 1 ,
type = > 'boolean' ,
description = > "Set the real time clock to local time. This is enabled by default if ostype indicates a Microsoft OS." ,
} ,
freeze = > {
optional = > 1 ,
type = > 'boolean' ,
description = > "Freeze CPU at startup (use 'c' monitor command to start execution)." ,
} ,
vga = > {
optional = > 1 ,
2018-11-09 15:31:09 +03:00
type = > 'string' , format = > $ vga_fmt ,
description = > "Configure the VGA hardware." ,
verbose_description = > "Configure the VGA Hardware. If you want to use " .
"high resolution modes (>= 1280x1024x16) you may need to increase " .
"the vga memory option. Since QEMU 2.9 the default VGA display type " .
"is 'std' for all OS types besides some Windows versions (XP and " .
"older) which use 'cirrus'. The 'qxl' option enables the SPICE " .
"display server. For win* OS you can select how many independent " .
"displays you want, Linux guests can add displays them self.\n" .
"You can also run without any graphic card, using a serial device as terminal." ,
2011-08-23 09:47:04 +04:00
} ,
2011-09-08 13:39:56 +04:00
watchdog = > {
optional = > 1 ,
type = > 'string' , format = > 'pve-qm-watchdog' ,
2016-05-19 14:13:25 +03:00
description = > "Create a virtual hardware watchdog device." ,
verbose_description = > "Create a virtual hardware watchdog device. Once enabled" .
2016-03-16 16:53:07 +03:00
" (by a guest action), the watchdog must be periodically polled " .
"by an agent inside the guest or else the watchdog will reset " .
"the guest (or execute the respective action specified)" ,
2011-09-08 13:39:56 +04:00
} ,
2011-08-23 09:47:04 +04:00
startdate = > {
optional = > 1 ,
2011-09-12 14:26:00 +04:00
type = > 'string' ,
2011-08-23 09:47:04 +04:00
typetext = > "(now | YYYY-MM-DD | YYYY-MM-DDTHH:MM:SS)" ,
description = > "Set the initial date of the real time clock. Valid format for date are: 'now' or '2006-06-17T16:01:21' or '2006-06-17'." ,
pattern = > '(now|\d{4}-\d{1,2}-\d{1,2}(T\d{1,2}:\d{1,2}:\d{1,2})?)' ,
default = > 'now' ,
} ,
2015-04-22 11:02:33 +03:00
startup = > get_standard_option ( 'pve-startup-order' ) ,
2013-04-18 19:05:29 +04:00
template = > {
optional = > 1 ,
type = > 'boolean' ,
description = > "Enable/disable Template." ,
default = > 0 ,
} ,
2011-08-23 09:47:04 +04:00
args = > {
optional = > 1 ,
type = > 'string' ,
2016-05-19 14:13:25 +03:00
description = > "Arbitrary arguments passed to kvm." ,
verbose_description = > << EODESCR ,
2016-04-15 14:12:00 +03:00
Arbitrary arguments passed to kvm , for example:
2011-08-23 09:47:04 +04:00
args: - no - reboot - no - hpet
2016-04-15 14:12:00 +03:00
NOTE: this option is for experts only .
2011-08-23 09:47:04 +04:00
EODESCR
} ,
tablet = > {
optional = > 1 ,
type = > 'boolean' ,
default = > 1 ,
2016-05-19 14:13:25 +03:00
description = > "Enable/disable the USB tablet device." ,
verbose_description = > "Enable/disable the USB tablet device. This device is " .
2016-03-16 16:53:07 +03:00
"usually needed to allow absolute mouse positioning with VNC. " .
"Else the mouse runs out of sync with normal VNC clients. " .
"If you're running lots of console-only guests on one host, " .
"you may consider disabling this to save some context switches. " .
"This is turned off by default if you use spice (-vga=qxl)." ,
2011-08-23 09:47:04 +04:00
} ,
migrate_speed = > {
optional = > 1 ,
type = > 'integer' ,
description = > "Set maximum speed (in MB/s) for migrations. Value 0 is no limit." ,
minimum = > 0 ,
default = > 0 ,
} ,
migrate_downtime = > {
optional = > 1 ,
2012-12-30 22:03:00 +04:00
type = > 'number' ,
2011-08-23 09:47:04 +04:00
description = > "Set maximum tolerated downtime (in seconds) for migrations." ,
minimum = > 0 ,
2012-12-30 22:03:00 +04:00
default = > 0.1 ,
2011-08-23 09:47:04 +04:00
} ,
cdrom = > {
optional = > 1 ,
2016-10-18 11:38:58 +03:00
type = > 'string' , format = > 'pve-qm-ide' ,
2016-11-05 17:56:12 +03:00
typetext = > '<volume>' ,
2011-08-23 09:47:04 +04:00
description = > "This is an alias for option -ide2" ,
} ,
cpu = > {
optional = > 1 ,
description = > "Emulated CPU type." ,
type = > 'string' ,
2020-01-16 18:40:48 +03:00
format = > $ PVE:: QemuServer:: CPUConfig:: cpu_fmt ,
2011-08-23 09:47:04 +04:00
} ,
2012-09-10 12:15:14 +04:00
parent = > get_standard_option ( 'pve-snapshot-name' , {
optional = > 1 ,
description = > "Parent snapshot name. This is used internally, and should not be modified." ,
} ) ,
2012-09-11 10:45:39 +04:00
snaptime = > {
optional = > 1 ,
description = > "Timestamp for snapshots." ,
type = > 'integer' ,
minimum = > 0 ,
} ,
2012-09-12 13:59:48 +04:00
vmstate = > {
optional = > 1 ,
type = > 'string' , format = > 'pve-volume-id' ,
description = > "Reference to a volume which stores the VM state. This is used internally for snapshots." ,
} ,
2017-05-15 15:11:56 +03:00
vmstatestorage = > get_standard_option ( 'pve-storage-id' , {
description = > "Default storage for VM state volumes/files." ,
optional = > 1 ,
} ) ,
2018-09-14 15:08:43 +03:00
runningmachine = > get_standard_option ( 'pve-qemu-machine' , {
description = > "Specifies the Qemu machine type of the running vm. This is used internally for snapshots." ,
} ) ,
machine = > get_standard_option ( 'pve-qemu-machine' ) ,
2018-11-12 16:10:34 +03:00
arch = > {
description = > "Virtual processor architecture. Defaults to the host." ,
optional = > 1 ,
type = > 'string' ,
enum = > [ qw( x86_64 aarch64 ) ] ,
} ,
2014-06-26 13:12:25 +04:00
smbios1 = > {
description = > "Specify SMBIOS type 1 fields." ,
type = > 'string' , format = > 'pve-qm-smbios1' ,
2019-06-11 18:39:20 +03:00
maxLength = > 512 ,
2014-06-26 13:12:25 +04:00
optional = > 1 ,
} ,
2015-09-03 16:35:37 +03:00
protection = > {
optional = > 1 ,
type = > 'boolean' ,
2016-05-19 14:13:25 +03:00
description = > "Sets the protection flag of the VM. This will disable the remove VM and remove disk operations." ,
2015-09-03 16:35:37 +03:00
default = > 0 ,
} ,
2015-12-10 12:48:04 +03:00
bios = > {
2015-11-21 10:48:59 +03:00
optional = > 1 ,
2015-12-10 12:48:04 +03:00
type = > 'string' ,
enum = > [ qw( seabios ovmf ) ] ,
description = > "Select BIOS implementation." ,
default = > 'seabios' ,
2015-11-21 10:48:59 +03:00
} ,
2018-09-19 12:35:11 +03:00
vmgenid = > {
type = > 'string' ,
pattern = > '(?:[a-fA-F0-9]{8}(?:-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}|[01])' ,
format_description = > 'UUID' ,
2018-09-19 14:33:40 +03:00
description = > "Set VM Generation ID. Use '1' to autogenerate on create or update, pass '0' to disable explicitly." ,
verbose_description = > "The VM generation ID (vmgenid) device exposes a" .
" 128-bit integer value identifier to the guest OS. This allows to" .
" notify the guest operating system when the virtual machine is" .
" executed with a different configuration (e.g. snapshot execution" .
" or creation from a template). The guest operating system notices" .
" the change, and is then able to react as appropriate by marking" .
" its copies of distributed databases as dirty, re-initializing its" .
" random number generator, etc.\n" .
"Note that auto-creation only works when done throug API/CLI create" .
" or update methods, but not when manually editing the config file." ,
default = > "1 (autogenerated)" ,
2018-09-19 12:35:11 +03:00
optional = > 1 ,
} ,
2019-01-31 16:33:39 +03:00
hookscript = > {
type = > 'string' ,
format = > 'pve-volume-id' ,
optional = > 1 ,
description = > "Script that will be executed during various steps in the vms lifetime." ,
} ,
2019-02-22 13:38:33 +03:00
ivshmem = > {
type = > 'string' ,
format = > $ ivshmem_fmt ,
description = > "Inter-VM shared memory. Useful for direct communication between VMs, or to the host." ,
optional = > 1 ,
2019-07-17 16:58:57 +03:00
} ,
audio0 = > {
type = > 'string' ,
2019-07-19 16:15:44 +03:00
format = > $ audio_fmt ,
2019-07-18 10:08:40 +03:00
description = > "Configure a audio device, useful in combination with QXL/Spice." ,
2019-07-17 16:58:57 +03:00
optional = > 1
} ,
2019-08-22 18:33:18 +03:00
spice_enhancements = > {
type = > 'string' ,
format = > $ spice_enhancements_fmt ,
description = > "Configure additional enhancements for SPICE." ,
optional = > 1
} ,
2019-10-31 15:36:25 +03:00
tags = > {
type = > 'string' , format = > 'pve-tag-list' ,
description = > 'Tags of the VM. This is only meta information.' ,
optional = > 1 ,
} ,
fix #2264: add virtio-rng device
Allow a user to add a virtio-rng-pci (an emulated hardware random
number generator) to a VM with the rng0 setting. The setting is
version_guard()-ed.
Limit the selection of entropy source to one of three:
/dev/urandom (preferred): Non-blocking kernel entropy source
/dev/random: Blocking kernel source
/dev/hwrng: Hardware RNG on the host for passthrough
QEMU itself defaults to /dev/urandom (or the equivalent getrandom()
call) if no source file is given, but I don't fully trust that
behaviour to stay constant, considering the documentation [0] already
disagrees with the code [1], so let's always specify the file ourselves.
/dev/urandom is preferred, since it prevents host entropy starvation.
The quality of randomness is still good enough to emulate a hwrng, since
a) it's still seeded from the kernel's true entropy pool periodically
and b) it's mixed with true entropy in the guest as well.
Additionally, all sources about entropy predicition attacks I could find
mention that to predict /dev/urandom results, /dev/random has to be
accessed or manipulated in one way or the other - this is not possible
from a VM however, as the entropy we're talking about comes from the
*hosts* blocking pool.
More about the entropy and security implications of the non-blocking
interface in [2] and [3].
Note further that only one /dev/hwrng exists at any given time, if
multiple RNGs are available, only the one selected in
'/sys/devices/virtual/misc/hw_random/rng_current' will feed the file.
Selecting this is left as an exercise to the user, if at all required.
We limit the available entropy to 1 KiB/s by default, but allow the user
to override this. Interesting to note is that the limiter does not work
linearly, i.e. max_bytes=1024/period=1000 means that up to 1 KiB of data
becomes available on a 1000 millisecond timer, not that 1 KiB is
streamed to the guest over the course of one second - hence the
configurable period.
The default used here is the same as given in the QEMU documentation [0]
and has been verified to affect entropy availability in a guest by
measuring /dev/random throughput. 1 KiB/s is enough to avoid any
early-boot entropy shortages, and already has a significant impact on
/dev/random availability in the guest.
[0] https://wiki.qemu.org/Features/VirtIORNG
[1] https://git.qemu.org/?p=qemu.git;a=blob;f=crypto/random-platform.c;h=f92f96987d7d262047c7604b169a7fdf11236107;hb=HEAD
[2] https://lwn.net/Articles/261804/
[3] https://lwn.net/Articles/808575/
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-02-20 20:10:44 +03:00
rng0 = > {
type = > 'string' ,
format = > $ rng_fmt ,
description = > "Configure a VirtIO-based Random Number Generator." ,
optional = > 1 ,
} ,
2015-08-17 16:46:07 +03:00
} ;
2019-02-07 17:12:35 +03:00
my $ cicustom_fmt = {
meta = > {
type = > 'string' ,
optional = > 1 ,
description = > 'Specify a custom file containing all meta data passed to the VM via cloud-init. This is provider specific meaning configdrive2 and nocloud differ.' ,
format = > 'pve-volume-id' ,
format_description = > 'volume' ,
} ,
network = > {
type = > 'string' ,
optional = > 1 ,
description = > 'Specify a custom file containing all network data passed to the VM via cloud-init.' ,
format = > 'pve-volume-id' ,
format_description = > 'volume' ,
} ,
user = > {
type = > 'string' ,
optional = > 1 ,
description = > 'Specify a custom file containing all user data passed to the VM via cloud-init.' ,
format = > 'pve-volume-id' ,
format_description = > 'volume' ,
} ,
} ;
PVE::JSONSchema:: register_format ( 'pve-qm-cicustom' , $ cicustom_fmt ) ;
2015-08-17 16:46:07 +03:00
my $ confdesc_cloudinit = {
2018-02-27 12:45:08 +03:00
citype = > {
optional = > 1 ,
type = > 'string' ,
2018-03-07 11:31:44 +03:00
description = > 'Specifies the cloud-init configuration format. The default depends on the configured operating system type (`ostype`. We use the `nocloud` format for Linux, and `configdrive2` for windows.' ,
2018-02-27 12:45:08 +03:00
enum = > [ 'configdrive2' , 'nocloud' ] ,
} ,
2018-03-06 17:08:23 +03:00
ciuser = > {
optional = > 1 ,
type = > 'string' ,
description = > "cloud-init: User name to change ssh keys and password for instead of the image's configured default user." ,
} ,
cipassword = > {
optional = > 1 ,
type = > 'string' ,
2018-03-07 12:53:41 +03:00
description = > 'cloud-init: Password to assign the user. Using this is generally not recommended. Use ssh keys instead. Also note that older cloud-init versions do not support hashed passwords.' ,
2018-03-06 17:08:23 +03:00
} ,
2019-02-07 17:12:35 +03:00
cicustom = > {
optional = > 1 ,
type = > 'string' ,
description = > 'cloud-init: Specify custom files to replace the automatically generated ones at start.' ,
format = > 'pve-qm-cicustom' ,
} ,
2015-06-16 15:26:43 +03:00
searchdomain = > {
optional = > 1 ,
type = > 'string' ,
description = > "cloud-init: Sets DNS search domains for a container. Create will automatically use the setting from the host if neither searchdomain nor nameserver are set." ,
} ,
nameserver = > {
optional = > 1 ,
type = > 'string' , format = > 'address-list' ,
description = > "cloud-init: Sets DNS server IP address for a container. Create will automatically use the setting from the host if neither searchdomain nor nameserver are set." ,
} ,
sshkeys = > {
optional = > 1 ,
type = > 'string' ,
format = > 'urlencoded' ,
2018-03-07 12:53:41 +03:00
description = > "cloud-init: Setup public SSH keys (one key per line, OpenSSH format)." ,
2015-06-16 15:26:43 +03:00
} ,
2011-08-23 09:47:04 +04:00
} ;
# what about other qemu settings ?
#cpu => 'string',
#machine => 'string',
#fda => 'file',
#fdb => 'file',
#mtdblock => 'file',
#sd => 'file',
#pflash => 'file',
#snapshot => 'bool',
#bootp => 'file',
##tftp => 'dir',
##smb => 'dir',
#kernel => 'file',
#append => 'string',
#initrd => 'file',
##soundhw => 'string',
while ( my ( $ k , $ v ) = each %$ confdesc ) {
PVE::JSONSchema:: register_standard_option ( "pve-qm-$k" , $ v ) ;
}
my $ MAX_USB_DEVICES = 5 ;
2012-08-20 13:10:24 +04:00
my $ MAX_NETS = 32 ;
2019-09-05 19:13:00 +03:00
my $ MAX_HOSTPCI_DEVICES = 16 ;
2011-09-11 11:00:00 +04:00
my $ MAX_SERIAL_PORTS = 4 ;
2011-09-11 11:00:01 +04:00
my $ MAX_PARALLEL_PORTS = 3 ;
add custom numa topology support
numaX: cpus=<id[-id],memory=<mb>[[,hostnodes=<id[-id]>][,policy=<preferred|bind|interleave>]]
example:
-------
sockets:4
cores:2
memory:4096
numa: 1
numa0: cpus=0-1,memory=1024,hostnodes=0-1,policy=interleave
numa1: cpus=2-3,memory=3072,hostnodes=2,policy=bind
qemu command line
-----------------
-object memory-backend-ram,size=1024M,policy=interleave,host-nodes=0-1,id=ram-node0
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0
-object memory-backend-ram,size=3072M,policy=bind,host-nodes=2,id=ram-node1
-numa node,nodeid=1,cpus=2-3,memdev=ram-node1
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-12-03 18:23:48 +03:00
my $ MAX_NUMA = 8 ;
2016-03-30 13:20:10 +03:00
my $ numa_fmt = {
cpus = > {
type = > "string" ,
pattern = > qr/\d+(?:-\d+)?(?:;\d+(?:-\d+)?)*/ ,
2016-05-19 14:13:25 +03:00
description = > "CPUs accessing this NUMA node." ,
2016-03-30 13:20:10 +03:00
format_description = > "id[-id];..." ,
} ,
memory = > {
type = > "number" ,
2016-05-19 14:13:25 +03:00
description = > "Amount of memory this NUMA node provides." ,
2016-03-30 13:20:10 +03:00
optional = > 1 ,
} ,
hostnodes = > {
type = > "string" ,
pattern = > qr/\d+(?:-\d+)?(?:;\d+(?:-\d+)?)*/ ,
2016-05-19 14:13:25 +03:00
description = > "Host NUMA nodes to use." ,
2016-03-30 13:20:10 +03:00
format_description = > "id[-id];..." ,
optional = > 1 ,
} ,
policy = > {
type = > 'string' ,
enum = > [ qw( preferred bind interleave ) ] ,
2016-05-19 14:13:25 +03:00
description = > "NUMA allocation policy." ,
2016-03-30 13:20:10 +03:00
optional = > 1 ,
} ,
} ;
PVE::JSONSchema:: register_format ( 'pve-qm-numanode' , $ numa_fmt ) ;
add custom numa topology support
numaX: cpus=<id[-id],memory=<mb>[[,hostnodes=<id[-id]>][,policy=<preferred|bind|interleave>]]
example:
-------
sockets:4
cores:2
memory:4096
numa: 1
numa0: cpus=0-1,memory=1024,hostnodes=0-1,policy=interleave
numa1: cpus=2-3,memory=3072,hostnodes=2,policy=bind
qemu command line
-----------------
-object memory-backend-ram,size=1024M,policy=interleave,host-nodes=0-1,id=ram-node0
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0
-object memory-backend-ram,size=3072M,policy=bind,host-nodes=2,id=ram-node1
-numa node,nodeid=1,cpus=2-3,memdev=ram-node1
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-12-03 18:23:48 +03:00
my $ numadesc = {
optional = > 1 ,
2016-03-30 13:20:10 +03:00
type = > 'string' , format = > $ numa_fmt ,
2016-05-19 14:13:25 +03:00
description = > "NUMA topology." ,
add custom numa topology support
numaX: cpus=<id[-id],memory=<mb>[[,hostnodes=<id[-id]>][,policy=<preferred|bind|interleave>]]
example:
-------
sockets:4
cores:2
memory:4096
numa: 1
numa0: cpus=0-1,memory=1024,hostnodes=0-1,policy=interleave
numa1: cpus=2-3,memory=3072,hostnodes=2,policy=bind
qemu command line
-----------------
-object memory-backend-ram,size=1024M,policy=interleave,host-nodes=0-1,id=ram-node0
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0
-object memory-backend-ram,size=3072M,policy=bind,host-nodes=2,id=ram-node1
-numa node,nodeid=1,cpus=2-3,memdev=ram-node1
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-12-03 18:23:48 +03:00
} ;
PVE::JSONSchema:: register_standard_option ( "pve-qm-numanode" , $ numadesc ) ;
for ( my $ i = 0 ; $ i < $ MAX_NUMA ; $ i + + ) {
$ confdesc - > { "numa$i" } = $ numadesc ;
}
2011-08-23 09:47:04 +04:00
my $ nic_model_list = [ 'rtl8139' , 'ne2k_pci' , 'e1000' , 'pcnet' , 'virtio' ,
2015-01-17 15:46:32 +03:00
'ne2k_isa' , 'i82551' , 'i82557b' , 'i82559er' , 'vmxnet3' ,
'e1000-82540em' , 'e1000-82544gc' , 'e1000-82545em' ] ;
2011-09-15 11:11:27 +04:00
my $ nic_model_list_txt = join ( ' ' , sort @$ nic_model_list ) ;
2011-08-23 09:47:04 +04:00
2016-05-19 14:13:25 +03:00
my $ net_fmt_bridge_descr = << __EOD__ ;
Bridge to attach the network device to . The Proxmox VE standard bridge
is called 'vmbr0' .
If you do not specify a bridge , we create a kvm user ( NATed ) network
device , which provides DHCP and DNS services . The following addresses
are used:
10.0 .2 .2 Gateway
10.0 .2 .3 DNS Server
10.0 .2 .4 SMB Server
The DHCP server assign addresses to the guest starting from 10.0 .2 .15 .
__EOD__
2016-03-30 13:20:11 +03:00
my $ net_fmt = {
2019-03-12 18:07:45 +03:00
macaddr = > get_standard_option ( 'mac-addr' , {
2016-05-19 14:13:25 +03:00
description = > "MAC address. That address must be unique withing your network. This is automatically generated if not specified." ,
2019-03-12 18:07:45 +03:00
} ) ,
2016-05-11 11:11:49 +03:00
model = > {
type = > 'string' ,
2016-05-19 14:13:25 +03:00
description = > "Network Card Model. The 'virtio' model provides the best performance with very low CPU overhead. If your guest does not support this driver, it is usually best to use 'e1000'." ,
2016-05-11 11:11:49 +03:00
enum = > $ nic_model_list ,
default_key = > 1 ,
} ,
( map { $ _ = > { keyAlias = > 'model' , alias = > 'macaddr' } } @$ nic_model_list ) ,
2016-03-30 13:20:11 +03:00
bridge = > {
type = > 'string' ,
2016-05-19 14:13:25 +03:00
description = > $ net_fmt_bridge_descr ,
2016-03-30 13:20:11 +03:00
format_description = > 'bridge' ,
optional = > 1 ,
} ,
queues = > {
type = > 'integer' ,
minimum = > 0 , maximum = > 16 ,
description = > 'Number of packet queues to be used on the device.' ,
optional = > 1 ,
} ,
rate = > {
type = > 'number' ,
minimum = > 0 ,
2016-05-19 14:13:25 +03:00
description = > "Rate limit in mbps (megabytes per second) as floating point number." ,
2016-03-30 13:20:11 +03:00
optional = > 1 ,
} ,
tag = > {
type = > 'integer' ,
2016-05-18 12:18:28 +03:00
minimum = > 1 , maximum = > 4094 ,
2016-03-30 13:20:11 +03:00
description = > 'VLAN tag to apply to packets on this interface.' ,
optional = > 1 ,
} ,
trunks = > {
type = > 'string' ,
pattern = > qr/\d+(?:-\d+)?(?:;\d+(?:-\d+)?)*/ ,
description = > 'VLAN trunks to pass through this interface.' ,
2016-05-11 11:11:49 +03:00
format_description = > 'vlanid[;vlanid...]' ,
2016-03-30 13:20:11 +03:00
optional = > 1 ,
} ,
firewall = > {
type = > 'boolean' ,
description = > 'Whether this interface should be protected by the firewall.' ,
optional = > 1 ,
} ,
link_down = > {
type = > 'boolean' ,
2016-05-19 14:13:25 +03:00
description = > 'Whether this interface should be disconnected (like pulling the plug).' ,
2016-03-30 13:20:11 +03:00
optional = > 1 ,
} ,
} ;
2016-05-19 14:13:25 +03:00
2011-08-23 09:47:04 +04:00
my $ netdesc = {
optional = > 1 ,
2016-05-11 11:11:49 +03:00
type = > 'string' , format = > $ net_fmt ,
2016-05-19 14:13:25 +03:00
description = > "Specify network devices." ,
2011-08-23 09:47:04 +04:00
} ;
2016-05-19 14:13:25 +03:00
2011-08-23 09:47:04 +04:00
PVE::JSONSchema:: register_standard_option ( "pve-qm-net" , $ netdesc ) ;
2015-06-16 15:26:43 +03:00
my $ ipconfig_fmt = {
ip = > {
type = > 'string' ,
format = > 'pve-ipv4-config' ,
format_description = > 'IPv4Format/CIDR' ,
description = > 'IPv4 address in CIDR format.' ,
optional = > 1 ,
default = > 'dhcp' ,
} ,
gw = > {
type = > 'string' ,
format = > 'ipv4' ,
format_description = > 'GatewayIPv4' ,
description = > 'Default gateway for IPv4 traffic.' ,
optional = > 1 ,
requires = > 'ip' ,
} ,
ip6 = > {
type = > 'string' ,
format = > 'pve-ipv6-config' ,
format_description = > 'IPv6Format/CIDR' ,
description = > 'IPv6 address in CIDR format.' ,
optional = > 1 ,
default = > 'dhcp' ,
} ,
gw6 = > {
type = > 'string' ,
format = > 'ipv6' ,
format_description = > 'GatewayIPv6' ,
description = > 'Default gateway for IPv6 traffic.' ,
optional = > 1 ,
requires = > 'ip6' ,
} ,
} ;
PVE::JSONSchema:: register_format ( 'pve-qm-ipconfig' , $ ipconfig_fmt ) ;
my $ ipconfigdesc = {
optional = > 1 ,
type = > 'string' , format = > 'pve-qm-ipconfig' ,
description = > << 'EODESCR' ,
cloud - init: Specify IP addresses and gateways for the corresponding interface .
IP addresses use CIDR notation , gateways are optional but need an IP of the same type specified .
The special string 'dhcp' can be used for IP addresses to use DHCP , in which case no explicit gateway should be provided .
For IPv6 the special string 'auto' can be used to use stateless autoconfiguration .
If cloud - init is enabled and neither an IPv4 nor an IPv6 address is specified , it defaults to using dhcp on IPv4 .
EODESCR
} ;
PVE::JSONSchema:: register_standard_option ( "pve-qm-ipconfig" , $ netdesc ) ;
2011-08-23 09:47:04 +04:00
for ( my $ i = 0 ; $ i < $ MAX_NETS ; $ i + + ) {
$ confdesc - > { "net$i" } = $ netdesc ;
2015-08-17 16:46:07 +03:00
$ confdesc_cloudinit - > { "ipconfig$i" } = $ ipconfigdesc ;
}
foreach my $ key ( keys %$ confdesc_cloudinit ) {
$ confdesc - > { $ key } = $ confdesc_cloudinit - > { $ key } ;
2011-08-23 09:47:04 +04:00
}
2016-04-21 12:40:24 +03:00
PVE::JSONSchema:: register_format ( 'pve-volume-id-or-qm-path' , \ & verify_volume_id_or_qm_path ) ;
sub verify_volume_id_or_qm_path {
2016-03-30 13:20:09 +03:00
my ( $ volid , $ noerr ) = @ _ ;
2016-04-21 12:40:24 +03:00
if ( $ volid eq 'none' || $ volid eq 'cdrom' || $ volid =~ m | ^ / | ) {
return $ volid ;
}
# if its neither 'none' nor 'cdrom' nor a path, check if its a volume-id
2016-03-30 13:20:09 +03:00
$ volid = eval { PVE::JSONSchema:: check_format ( 'pve-volume-id' , $ volid , '' ) } ;
if ( $@ ) {
return undef if $ noerr ;
die $@ ;
}
return $ volid ;
}
2016-03-30 13:20:07 +03:00
my $ usb_fmt = {
2016-02-11 17:49:09 +03:00
host = > {
default_key = > 1 ,
type = > 'string' , format = > 'pve-qm-usb-device' ,
format_description = > 'HOSTUSBDEVICE|spice' ,
2016-05-19 14:13:25 +03:00
description = > << EODESCR ,
The Host USB device or port or the value 'spice' . HOSTUSBDEVICE syntax is:
'bus-port(.port)*' ( decimal numbers ) or
'vendor_id:product_id' ( hexadeciaml numbers ) or
'spice'
You can use the 'lsusb -t' command to list existing usb devices .
NOTE: This option allows direct access to host hardware . So it is no longer possible to migrate such machines - use with special care .
The value 'spice' can be used to add a usb redirection devices for spice .
EODESCR
2016-02-11 17:49:09 +03:00
} ,
usb3 = > {
optional = > 1 ,
type = > 'boolean' ,
2019-09-23 11:25:01 +03:00
description = > "Specifies whether if given host option is a USB3 device or port." ,
2016-05-19 14:13:25 +03:00
default = > 0 ,
2016-02-11 17:49:09 +03:00
} ,
} ;
2011-08-23 09:47:04 +04:00
my $ usbdesc = {
optional = > 1 ,
2016-03-30 13:20:07 +03:00
type = > 'string' , format = > $ usb_fmt ,
2016-05-19 14:13:25 +03:00
description = > "Configure an USB device (n is 0 to 4)." ,
2011-08-23 09:47:04 +04:00
} ;
PVE::JSONSchema:: register_standard_option ( "pve-qm-usb" , $ usbdesc ) ;
2019-11-12 16:23:03 +03:00
my $ PCIRE = qr/([a-f0-9]{4}:)?[a-f0-9]{2}:[a-f0-9]{2}(?:\.[a-f0-9])?/ ;
2016-03-30 13:20:12 +03:00
my $ hostpci_fmt = {
host = > {
default_key = > 1 ,
type = > 'string' ,
pattern = > qr/$PCIRE(;$PCIRE)*/ ,
format_description = > 'HOSTPCIID[;HOSTPCIID2...]' ,
2016-05-19 14:13:25 +03:00
description = > << EODESCR ,
2019-05-03 15:22:38 +03:00
Host PCI device pass through . The PCI ID of a host ' s PCI device or a list
2016-05-19 14:13:25 +03:00
of PCI virtual functions of the host . HOSTPCIID syntax is:
'bus:dev.func' ( hexadecimal numbers )
You can us the 'lspci' command to list existing PCI devices .
EODESCR
2016-03-30 13:20:12 +03:00
} ,
rombar = > {
type = > 'boolean' ,
2016-05-19 14:13:25 +03:00
description = > "Specify whether or not the device's ROM will be visible in the guest's memory map." ,
2016-03-30 13:20:12 +03:00
optional = > 1 ,
default = > 1 ,
} ,
2017-01-09 16:36:24 +03:00
romfile = > {
type = > 'string' ,
pattern = > '[^,;]+' ,
format_description = > 'string' ,
description = > "Custom pci device rom filename (must be located in /usr/share/kvm/)." ,
optional = > 1 ,
} ,
2016-03-30 13:20:12 +03:00
pcie = > {
type = > 'boolean' ,
2016-05-19 14:13:25 +03:00
description = > "Choose the PCI-express bus (needs the 'q35' machine model)." ,
2016-03-30 13:20:12 +03:00
optional = > 1 ,
default = > 0 ,
} ,
'x-vga' = > {
type = > 'boolean' ,
2016-05-19 14:13:25 +03:00
description = > "Enable vfio-vga device support." ,
2016-03-30 13:20:12 +03:00
optional = > 1 ,
default = > 0 ,
} ,
2018-11-20 19:13:39 +03:00
'mdev' = > {
type = > 'string' ,
format_description = > 'string' ,
pattern = > '[^/\.:]+' ,
optional = > 1 ,
description = > << EODESCR
The type of mediated device to use .
An instance of this type will be created on startup of the VM and
will be cleaned up when the VM stops .
EODESCR
}
2016-03-30 13:20:12 +03:00
} ;
PVE::JSONSchema:: register_format ( 'pve-qm-hostpci' , $ hostpci_fmt ) ;
2011-09-11 10:59:59 +04:00
my $ hostpcidesc = {
optional = > 1 ,
type = > 'string' , format = > 'pve-qm-hostpci' ,
2016-05-19 14:13:25 +03:00
description = > "Map host PCI devices into guest." ,
2016-05-20 12:59:30 +03:00
verbose_description = > << EODESCR ,
Map host PCI devices into guest .
2019-05-03 15:22:38 +03:00
NOTE: This option allows direct access to host hardware . So it is no longer
2016-05-20 12:59:30 +03:00
possible to migrate such machines - use with special care .
CAUTION: Experimental ! User reported problems with this option .
EODESCR
2011-09-11 10:59:59 +04:00
} ;
PVE::JSONSchema:: register_standard_option ( "pve-qm-hostpci" , $ hostpcidesc ) ;
2011-09-11 11:00:00 +04:00
my $ serialdesc = {
optional = > 1 ,
2011-09-12 09:44:02 +04:00
type = > 'string' ,
2014-12-01 11:47:36 +03:00
pattern = > '(/dev/.+|socket)' ,
2016-05-19 14:13:25 +03:00
description = > "Create a serial device inside the VM (n is 0 to 3)" ,
verbose_description = > << EODESCR ,
Create a serial device inside the VM ( n is 0 to 3 ) , and pass through a
host serial device ( i . e . /dev/ ttyS0 ) , or create a unix socket on the
host side ( use 'qm terminal' to open a terminal connection ) .
2011-09-11 11:00:00 +04:00
2016-03-23 12:22:17 +03:00
NOTE: If you pass through a host serial device , it is no longer possible to migrate such machines - use with special care .
2011-09-11 11:00:00 +04:00
2016-05-19 14:13:25 +03:00
CAUTION: Experimental ! User reported problems with this option .
2011-09-11 11:00:00 +04:00
EODESCR
} ;
2011-09-11 11:00:01 +04:00
my $ paralleldesc = {
optional = > 1 ,
2011-09-12 09:44:02 +04:00
type = > 'string' ,
2013-08-14 14:18:54 +04:00
pattern = > '/dev/parport\d+|/dev/usb/lp\d+' ,
2016-05-19 14:13:25 +03:00
description = > "Map host parallel devices (n is 0 to 2)." ,
verbose_description = > << EODESCR ,
2011-09-12 14:26:00 +04:00
Map host parallel devices ( n is 0 to 2 ) .
2011-09-11 11:00:01 +04:00
2016-03-23 12:22:17 +03:00
NOTE: This option allows direct access to host hardware . So it is no longer possible to migrate such machines - use with special care .
2011-09-11 11:00:01 +04:00
2016-05-19 14:13:25 +03:00
CAUTION: Experimental ! User reported problems with this option .
2011-09-11 11:00:01 +04:00
EODESCR
} ;
for ( my $ i = 0 ; $ i < $ MAX_PARALLEL_PORTS ; $ i + + ) {
$ confdesc - > { "parallel$i" } = $ paralleldesc ;
}
2011-09-11 11:00:00 +04:00
for ( my $ i = 0 ; $ i < $ MAX_SERIAL_PORTS ; $ i + + ) {
$ confdesc - > { "serial$i" } = $ serialdesc ;
}
2011-09-11 10:59:59 +04:00
for ( my $ i = 0 ; $ i < $ MAX_HOSTPCI_DEVICES ; $ i + + ) {
$ confdesc - > { "hostpci$i" } = $ hostpcidesc ;
}
2011-08-23 09:47:04 +04:00
2020-03-02 13:33:44 +03:00
for my $ key ( keys % { $ PVE:: QemuServer:: Drive:: drivedesc_hash } ) {
$ confdesc - > { $ key } = $ PVE:: QemuServer:: Drive:: drivedesc_hash - > { $ key } ;
2012-02-01 16:25:19 +04:00
}
2020-03-02 13:33:44 +03:00
for ( my $ i = 0 ; $ i < $ PVE:: QemuServer:: Drive:: MAX_UNUSED_DISKS ; $ i + + ) {
$ confdesc - > { "unused$i" } = $ PVE:: QemuServer:: Drive:: unuseddesc ;
2011-08-23 09:47:04 +04:00
}
for ( my $ i = 0 ; $ i < $ MAX_USB_DEVICES ; $ i + + ) {
$ confdesc - > { "usb$i" } = $ usbdesc ;
}
my $ kvm_api_version = 0 ;
sub kvm_version {
return $ kvm_api_version if $ kvm_api_version ;
2018-11-12 16:10:33 +03:00
open my $ fh , '<' , '/dev/kvm'
or return undef ;
2011-08-23 09:47:04 +04:00
2018-11-12 16:10:33 +03:00
# 0xae00 => KVM_GET_API_VERSION
$ kvm_api_version = ioctl ( $ fh , 0xae00 , 0 ) ;
2011-08-23 09:47:04 +04:00
2018-11-12 16:10:33 +03:00
return $ kvm_api_version ;
2011-08-23 09:47:04 +04:00
}
2019-08-13 16:19:07 +03:00
my $ kvm_user_version = { } ;
my $ kvm_mtime = { } ;
2011-08-23 09:47:04 +04:00
sub kvm_user_version {
2019-08-13 16:19:07 +03:00
my ( $ binary ) = @ _ ;
2011-08-23 09:47:04 +04:00
2019-08-13 16:19:07 +03:00
$ binary // = get_command_for_arch ( get_host_arch ( ) ) ; # get the native arch by default
my $ st = stat ( $ binary ) ;
2011-08-23 09:47:04 +04:00
2019-08-13 16:19:07 +03:00
my $ cachedmtime = $ kvm_mtime - > { $ binary } // - 1 ;
return $ kvm_user_version - > { $ binary } if $ kvm_user_version - > { $ binary } &&
$ cachedmtime == $ st - > mtime ;
$ kvm_user_version - > { $ binary } = 'unknown' ;
$ kvm_mtime - > { $ binary } = $ st - > mtime ;
2011-08-23 09:47:04 +04:00
2016-02-25 16:47:17 +03:00
my $ code = sub {
my $ line = shift ;
if ( $ line =~ m/^QEMU( PC)? emulator version (\d+\.\d+(\.\d+)?)(\.\d+)?[,\s]/ ) {
2019-08-13 16:19:07 +03:00
$ kvm_user_version - > { $ binary } = $ 2 ;
2016-02-25 16:47:17 +03:00
}
} ;
2011-09-12 14:26:00 +04:00
2019-08-13 16:19:07 +03:00
eval { run_command ( [ $ binary , '--version' ] , outfunc = > $ code ) ; } ;
2016-02-25 16:47:17 +03:00
warn $@ if $@ ;
2011-08-23 09:47:04 +04:00
2019-08-13 16:19:07 +03:00
return $ kvm_user_version - > { $ binary } ;
2011-08-23 09:47:04 +04:00
}
2018-12-20 12:44:13 +03:00
sub kernel_has_vhost_net {
return - c '/dev/vhost-net' ;
}
2011-08-23 09:47:04 +04:00
sub option_exists {
my $ key = shift ;
return defined ( $ confdesc - > { $ key } ) ;
2011-09-12 14:26:00 +04:00
}
2011-08-23 09:47:04 +04:00
my $ cdrom_path ;
sub get_cdrom_path {
return $ cdrom_path if $ cdrom_path ;
return $ cdrom_path = "/dev/cdrom" if - l "/dev/cdrom" ;
return $ cdrom_path = "/dev/cdrom1" if - l "/dev/cdrom1" ;
return $ cdrom_path = "/dev/cdrom2" if - l "/dev/cdrom2" ;
}
sub get_iso_path {
my ( $ storecfg , $ vmid , $ cdrom ) = @ _ ;
if ( $ cdrom eq 'cdrom' ) {
return get_cdrom_path ( ) ;
} elsif ( $ cdrom eq 'none' ) {
return '' ;
} elsif ( $ cdrom =~ m | ^ / | ) {
return $ cdrom ;
} else {
2011-09-15 11:11:27 +04:00
return PVE::Storage:: path ( $ storecfg , $ cdrom ) ;
2011-08-23 09:47:04 +04:00
}
}
# try to convert old style file names to volume IDs
sub filename_to_volume_id {
my ( $ vmid , $ file , $ media ) = @ _ ;
2015-06-16 15:26:43 +03:00
if ( ! ( $ file eq 'none' || $ file eq 'cdrom' ||
2011-08-23 09:47:04 +04:00
$ file =~ m | ^ /dev/ . + | || $ file =~ m/^([^:]+):(.+)$/ ) ) {
2011-09-12 14:26:00 +04:00
2011-08-23 09:47:04 +04:00
return undef if $ file =~ m | / | ;
2011-09-12 14:26:00 +04:00
2011-08-23 09:47:04 +04:00
if ( $ media && $ media eq 'cdrom' ) {
$ file = "local:iso/$file" ;
} else {
$ file = "local:$vmid/$file" ;
}
}
return $ file ;
}
sub verify_media_type {
my ( $ opt , $ vtype , $ media ) = @ _ ;
return if ! $ media ;
my $ etype ;
if ( $ media eq 'disk' ) {
2012-05-23 09:24:15 +04:00
$ etype = 'images' ;
2011-08-23 09:47:04 +04:00
} elsif ( $ media eq 'cdrom' ) {
$ etype = 'iso' ;
} else {
die "internal error" ;
}
return if ( $ vtype eq $ etype ) ;
2011-09-12 14:26:00 +04:00
2011-08-23 09:47:04 +04:00
raise_param_exc ( { $ opt = > "unexpected media type ($vtype != $etype)" } ) ;
}
sub cleanup_drive_path {
my ( $ opt , $ storecfg , $ drive ) = @ _ ;
# try to convert filesystem paths to volume IDs
if ( ( $ drive - > { file } !~ m/^(cdrom|none)$/ ) &&
( $ drive - > { file } !~ m | ^ /dev/ . + | ) &&
( $ drive - > { file } !~ m/^([^:]+):(.+)$/ ) &&
2011-09-12 14:26:00 +04:00
( $ drive - > { file } !~ m/^\d+$/ ) ) {
2011-08-23 09:47:04 +04:00
my ( $ vtype , $ volid ) = PVE::Storage:: path_to_volume_id ( $ storecfg , $ drive - > { file } ) ;
raise_param_exc ( { $ opt = > "unable to associate path '$drive->{file}' to any storage" } ) if ! $ vtype ;
$ drive - > { media } = 'cdrom' if ! $ drive - > { media } && $ vtype eq 'iso' ;
verify_media_type ( $ opt , $ vtype , $ drive - > { media } ) ;
$ drive - > { file } = $ volid ;
}
$ drive - > { media } = 'cdrom' if ! $ drive - > { media } && $ drive - > { file } =~ m/^(cdrom|none)$/ ;
}
2015-01-27 09:16:22 +03:00
sub parse_hotplug_features {
my ( $ data ) = @ _ ;
my $ res = { } ;
return $ res if $ data eq '0' ;
2015-03-27 08:16:24 +03:00
2015-01-27 09:16:22 +03:00
$ data = $ confdesc - > { hotplug } - > { default } if $ data eq '1' ;
2015-01-27 13:05:36 +03:00
foreach my $ feature ( PVE::Tools:: split_list ( $ data ) ) {
2015-01-27 09:16:22 +03:00
if ( $ feature =~ m/^(network|disk|cpu|memory|usb)$/ ) {
$ res - > { $ 1 } = 1 ;
} else {
2016-05-30 11:44:37 +03:00
die "invalid hotplug feature '$feature'\n" ;
2015-01-27 09:16:22 +03:00
}
}
return $ res ;
}
PVE::JSONSchema:: register_format ( 'pve-hotplug-features' , \ & pve_verify_hotplug_features ) ;
sub pve_verify_hotplug_features {
my ( $ value , $ noerr ) = @ _ ;
return $ value if parse_hotplug_features ( $ value ) ;
return undef if $ noerr ;
die "unable to parse hotplug option\n" ;
}
2012-03-19 13:32:52 +04:00
sub scsi_inquiry {
my ( $ fh , $ noerr ) = @ _ ;
my $ SG_IO = 0x2285 ;
my $ SG_GET_VERSION_NUM = 0x2282 ;
my $ versionbuf = "\x00" x 8 ;
my $ ret = ioctl ( $ fh , $ SG_GET_VERSION_NUM , $ versionbuf ) ;
if ( ! $ ret ) {
die "scsi ioctl SG_GET_VERSION_NUM failoed - $!\n" if ! $ noerr ;
return undef ;
}
2012-05-30 14:08:33 +04:00
my $ version = unpack ( "I" , $ versionbuf ) ;
2012-03-19 13:32:52 +04:00
if ( $ version < 30000 ) {
die "scsi generic interface too old\n" if ! $ noerr ;
return undef ;
}
2012-05-30 14:08:33 +04:00
2012-03-19 13:32:52 +04:00
my $ buf = "\x00" x 36 ;
my $ sensebuf = "\x00" x 8 ;
2013-07-15 15:12:18 +04:00
my $ cmd = pack ( "C x3 C x1" , 0x12 , 36 ) ;
2012-05-30 14:08:33 +04:00
2012-03-19 13:32:52 +04:00
# see /usr/include/scsi/sg.h
my $ sg_io_hdr_t = "i i C C s I P P P I I i P C C C C S S i I I" ;
2012-05-30 14:08:33 +04:00
my $ packet = pack ( $ sg_io_hdr_t , ord ( 'S' ) , - 3 , length ( $ cmd ) ,
length ( $ sensebuf ) , 0 , length ( $ buf ) , $ buf ,
2012-03-19 13:32:52 +04:00
$ cmd , $ sensebuf , 6000 ) ;
$ ret = ioctl ( $ fh , $ SG_IO , $ packet ) ;
if ( ! $ ret ) {
die "scsi ioctl SG_IO failed - $!\n" if ! $ noerr ;
return undef ;
}
2012-05-30 14:08:33 +04:00
2012-03-19 13:32:52 +04:00
my @ res = unpack ( $ sg_io_hdr_t , $ packet ) ;
if ( $ res [ 17 ] || $ res [ 18 ] ) {
die "scsi ioctl SG_IO status error - $!\n" if ! $ noerr ;
return undef ;
}
my $ res = { } ;
2013-07-15 15:19:54 +04:00
( my $ byte0 , my $ byte1 , $ res - > { vendor } ,
2012-03-19 13:32:52 +04:00
$ res - > { product } , $ res - > { revision } ) = unpack ( "C C x6 A8 A16 A4" , $ buf ) ;
2013-07-15 15:19:54 +04:00
$ res - > { removable } = $ byte1 & 128 ? 1 : 0 ;
$ res - > { type } = $ byte0 & 31 ;
2012-03-19 13:32:52 +04:00
return $ res ;
}
sub path_is_scsi {
my ( $ path ) = @ _ ;
my $ fh = IO::File - > new ( "+<$path" ) || return undef ;
my $ res = scsi_inquiry ( $ fh , 1 ) ;
close ( $ fh ) ;
return $ res ;
}
2014-06-18 08:54:45 +04:00
sub print_tabletdevice_full {
2018-11-12 16:10:42 +03:00
my ( $ conf , $ arch ) = @ _ ;
2014-11-10 08:31:08 +03:00
2019-11-19 14:23:48 +03:00
my $ q35 = PVE::QemuServer::Machine:: machine_type_is_q35 ( $ conf ) ;
2014-06-18 08:54:45 +04:00
# we use uhci for old VMs because tablet driver was buggy in older qemu
2018-11-12 16:10:42 +03:00
my $ usbbus ;
2019-11-19 14:23:48 +03:00
if ( PVE::QemuServer::Machine:: machine_type_is_q35 ( $ conf ) || $ arch eq 'aarch64' ) {
2018-11-12 16:10:42 +03:00
$ usbbus = 'ehci' ;
} else {
$ usbbus = 'uhci' ;
}
2014-11-10 08:31:08 +03:00
2014-06-18 08:54:45 +04:00
return "usb-tablet,id=tablet,bus=$usbbus.0,port=1" ;
}
2018-11-12 16:10:42 +03:00
sub print_keyboarddevice_full {
my ( $ conf , $ arch , $ machine ) = @ _ ;
return undef if $ arch ne 'aarch64' ;
return "usb-kbd,id=keyboard,bus=ehci.0,port=2" ;
}
2011-09-07 17:34:38 +04:00
sub print_drivedevice_full {
2018-11-12 16:10:42 +03:00
my ( $ storecfg , $ conf , $ vmid , $ drive , $ bridges , $ arch , $ machine_type ) = @ _ ;
2011-09-07 17:34:38 +04:00
my $ device = '' ;
my $ maxdev = 0 ;
2011-09-12 14:26:00 +04:00
2011-09-07 17:34:38 +04:00
if ( $ drive - > { interface } eq 'virtio' ) {
2018-11-12 16:10:42 +03:00
my $ pciaddr = print_pci_addr ( "$drive->{interface}$drive->{index}" , $ bridges , $ arch , $ machine_type ) ;
2011-12-07 14:41:27 +04:00
$ device = "virtio-blk-pci,drive=drive-$drive->{interface}$drive->{index},id=$drive->{interface}$drive->{index}$pciaddr" ;
2015-03-19 13:06:11 +03:00
$ device . = ",iothread=iothread-$drive->{interface}$drive->{index}" if $ drive - > { iothread } ;
2011-12-07 14:41:27 +04:00
} elsif ( $ drive - > { interface } eq 'scsi' ) {
2015-03-27 05:41:52 +03:00
2015-03-27 05:41:54 +03:00
my ( $ maxdev , $ controller , $ controller_prefix ) = scsihw_infos ( $ conf , $ drive ) ;
2011-12-07 14:41:27 +04:00
my $ unit = $ drive - > { index } % $ maxdev ;
my $ devicetype = 'hd' ;
2016-02-25 13:43:01 +03:00
my $ path = '' ;
if ( drive_is_cdrom ( $ drive ) ) {
$ devicetype = 'cd' ;
2013-07-15 15:11:28 +04:00
} else {
2016-02-25 13:43:01 +03:00
if ( $ drive - > { file } =~ m | ^ / | ) {
$ path = $ drive - > { file } ;
if ( my $ info = path_is_scsi ( $ path ) ) {
2017-01-24 13:25:52 +03:00
if ( $ info - > { type } == 0 && $ drive - > { scsiblock } ) {
2016-02-25 13:43:01 +03:00
$ devicetype = 'block' ;
} elsif ( $ info - > { type } == 1 ) { # tape
$ devicetype = 'generic' ;
}
}
} else {
$ path = PVE::Storage:: path ( $ storecfg , $ drive - > { file } ) ;
}
2019-10-23 12:39:03 +03:00
# for compatibility only, we prefer scsi-hd (#2408, #2355, #2380)
implement PVE Version addition for QEMU machine
With our QEMU 4.1.1 package we can pass a additional internal version
to QEMU's machine, it will be split out there and ignored, but
returned on a QMP 'query-machines' call.
This allows us to use it for increasing the granularity with which we
can roll-out HW layout changes/additions for VMs. Until now we
required a machine version bump, happening normally every major
release of QEMU, with seldom, for us irrelevant, exceptions.
This often delays rolling out a feature, which would break
live-migration, by several months. That can now be avoided, the new
"pve-version" component of the machine can be bumped at will, and
thus we are much more flexible.
That versions orders after the ($major, $minor) version components
from an stable release - it can thus also be reset on the next
release.
The implementation extends the qemu-machine REGEX, remembers
"pve-version" when doing a "query-machines" and integrates support
into the min_version and extract_version helpers.
We start out with a version of 1.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
Reviewed-by: Stefan Reiter <s.reiter@proxmox.com>
2019-11-25 13:18:13 +03:00
my $ version = PVE::QemuServer::Machine:: extract_version ( $ machine_type , kvm_user_version ( ) ) ;
2019-10-23 12:39:03 +03:00
if ( $ path =~ m/^iscsi\:\/\// &&
2019-11-19 14:23:49 +03:00
! min_version ( $ version , 4 , 1 ) ) {
2016-02-25 13:43:01 +03:00
$ devicetype = 'generic' ;
}
}
if ( ! $ conf - > { scsihw } || ( $ conf - > { scsihw } =~ m/^lsi/ ) ) {
$ device = "scsi-$devicetype,bus=$controller_prefix$controller.0,scsi-id=$unit,drive=drive-$drive->{interface}$drive->{index},id=$drive->{interface}$drive->{index}" ;
} else {
$ device = "scsi-$devicetype,bus=$controller_prefix$controller.0,channel=0,scsi-id=0,lun=$drive->{index},drive=drive-$drive->{interface}$drive->{index},id=$drive->{interface}$drive->{index}" ;
}
2012-07-30 16:58:40 +04:00
Add `ssd` property to IDE, SATA, and SCSI drives
When enabled, the `ssd` property exposes drives as SSDs (rather than
rotational hard disks) by setting QEMU's `rotation_rate` property [1,
2] on `ide-hd`, `scsi-block`, and `scsi-hd` devices. This is required
to enable support for TRIM and SSD-specific optimizations in certain
guest operating systems that are limited to emulated controller types
(IDE, AHCI, and non-VirtIO SCSI).
This change also unifies the diverging IDE and SATA code paths in
QemuServer::print_drivedevice_full(), which suffered from:
* Code duplication: The only differences between IDE and SATA were in
bus-unit specification and maximum device counts.
* Inconsistent implementation: The IDE code used the new `ide-hd`
and `ide-cd` device types, whereas SATA still relied on the deprecated
`ide-drive` [3, 4] (which doesn't support `rotation_rate`).
* Different feature sets: The IDE code exposed a `model` property that
the SATA code didn't, even though QEMU supports it for both.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1498042
[2] https://lists.gnu.org/archive/html/qemu-devel/2017-10/msg00698.html
[3] https://www.redhat.com/archives/libvir-list/2012-March/msg00684.html
[4] https://lists.gnu.org/archive/html/qemu-devel/2017-05/msg02024.html
Signed-off-by: Nick Chevsky <nchevsky@gmail.com>
2018-10-28 23:41:46 +03:00
if ( $ drive - > { ssd } && ( $ devicetype eq 'block' || $ devicetype eq 'hd' ) ) {
$ device . = ",rotation_rate=1" ;
}
2019-02-25 19:30:48 +03:00
$ device . = ",wwn=$drive->{wwn}" if $ drive - > { wwn } ;
Add `ssd` property to IDE, SATA, and SCSI drives
When enabled, the `ssd` property exposes drives as SSDs (rather than
rotational hard disks) by setting QEMU's `rotation_rate` property [1,
2] on `ide-hd`, `scsi-block`, and `scsi-hd` devices. This is required
to enable support for TRIM and SSD-specific optimizations in certain
guest operating systems that are limited to emulated controller types
(IDE, AHCI, and non-VirtIO SCSI).
This change also unifies the diverging IDE and SATA code paths in
QemuServer::print_drivedevice_full(), which suffered from:
* Code duplication: The only differences between IDE and SATA were in
bus-unit specification and maximum device counts.
* Inconsistent implementation: The IDE code used the new `ide-hd`
and `ide-cd` device types, whereas SATA still relied on the deprecated
`ide-drive` [3, 4] (which doesn't support `rotation_rate`).
* Different feature sets: The IDE code exposed a `model` property that
the SATA code didn't, even though QEMU supports it for both.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1498042
[2] https://lists.gnu.org/archive/html/qemu-devel/2017-10/msg00698.html
[3] https://www.redhat.com/archives/libvir-list/2012-March/msg00684.html
[4] https://lists.gnu.org/archive/html/qemu-devel/2017-05/msg02024.html
Signed-off-by: Nick Chevsky <nchevsky@gmail.com>
2018-10-28 23:41:46 +03:00
} elsif ( $ drive - > { interface } eq 'ide' || $ drive - > { interface } eq 'sata' ) {
2020-03-02 13:33:44 +03:00
my $ maxdev = ( $ drive - > { interface } eq 'sata' ) ? $ PVE:: QemuServer:: Drive:: MAX_SATA_DISKS : 2 ;
2011-12-07 14:41:27 +04:00
my $ controller = int ( $ drive - > { index } / $ maxdev ) ;
my $ unit = $ drive - > { index } % $ maxdev ;
my $ devicetype = ( $ drive - > { media } && $ drive - > { media } eq 'cdrom' ) ? "cd" : "hd" ;
Add `ssd` property to IDE, SATA, and SCSI drives
When enabled, the `ssd` property exposes drives as SSDs (rather than
rotational hard disks) by setting QEMU's `rotation_rate` property [1,
2] on `ide-hd`, `scsi-block`, and `scsi-hd` devices. This is required
to enable support for TRIM and SSD-specific optimizations in certain
guest operating systems that are limited to emulated controller types
(IDE, AHCI, and non-VirtIO SCSI).
This change also unifies the diverging IDE and SATA code paths in
QemuServer::print_drivedevice_full(), which suffered from:
* Code duplication: The only differences between IDE and SATA were in
bus-unit specification and maximum device counts.
* Inconsistent implementation: The IDE code used the new `ide-hd`
and `ide-cd` device types, whereas SATA still relied on the deprecated
`ide-drive` [3, 4] (which doesn't support `rotation_rate`).
* Different feature sets: The IDE code exposed a `model` property that
the SATA code didn't, even though QEMU supports it for both.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1498042
[2] https://lists.gnu.org/archive/html/qemu-devel/2017-10/msg00698.html
[3] https://www.redhat.com/archives/libvir-list/2012-March/msg00684.html
[4] https://lists.gnu.org/archive/html/qemu-devel/2017-05/msg02024.html
Signed-off-by: Nick Chevsky <nchevsky@gmail.com>
2018-10-28 23:41:46 +03:00
$ device = "ide-$devicetype" ;
if ( $ drive - > { interface } eq 'ide' ) {
$ device . = ",bus=ide.$controller,unit=$unit" ;
} else {
$ device . = ",bus=ahci$controller.$unit" ;
}
$ device . = ",drive=drive-$drive->{interface}$drive->{index},id=$drive->{interface}$drive->{index}" ;
if ( $ devicetype eq 'hd' ) {
if ( my $ model = $ drive - > { model } ) {
$ model = URI::Escape:: uri_unescape ( $ model ) ;
$ device . = ",model=$model" ;
}
if ( $ drive - > { ssd } ) {
$ device . = ",rotation_rate=1" ;
}
2015-09-30 11:23:27 +03:00
}
2019-02-25 19:30:48 +03:00
$ device . = ",wwn=$drive->{wwn}" if $ drive - > { wwn } ;
2011-12-07 14:41:27 +04:00
} elsif ( $ drive - > { interface } eq 'usb' ) {
die "implement me" ;
# -device ide-drive,bus=ide.1,unit=0,drive=drive-ide0-1-0,id=ide0-1-0
} else {
die "unsupported interface type" ;
2011-09-07 17:34:38 +04:00
}
2011-12-07 14:54:31 +04:00
$ device . = ",bootindex=$drive->{bootindex}" if $ drive - > { bootindex } ;
2018-04-05 11:54:41 +03:00
if ( my $ serial = $ drive - > { serial } ) {
$ serial = URI::Escape:: uri_unescape ( $ serial ) ;
$ device . = ",serial=$serial" ;
}
2011-09-07 17:34:38 +04:00
return $ device ;
}
2014-05-13 05:10:40 +04:00
sub get_initiator_name {
2014-05-17 11:07:18 +04:00
my $ initiator ;
2014-05-13 05:10:40 +04:00
2014-05-17 11:07:18 +04:00
my $ fh = IO::File - > new ( '/etc/iscsi/initiatorname.iscsi' ) || return undef ;
while ( defined ( my $ line = <$fh> ) ) {
next if $ line !~ m/^\s*InitiatorName\s*=\s*([\.\-:\w]+)/ ;
2014-05-13 05:10:40 +04:00
$ initiator = $ 1 ;
last ;
}
2014-05-17 11:07:18 +04:00
$ fh - > close ( ) ;
2014-05-13 05:10:40 +04:00
return $ initiator ;
}
2020-03-02 13:33:45 +03:00
sub print_drive_commandline_full {
2011-08-23 09:47:04 +04:00
my ( $ storecfg , $ vmid , $ drive ) = @ _ ;
2015-06-10 11:22:42 +03:00
my $ path ;
my $ volid = $ drive - > { file } ;
my $ format ;
2019-05-03 15:22:38 +03:00
2015-06-10 11:22:42 +03:00
if ( drive_is_cdrom ( $ drive ) ) {
$ path = get_iso_path ( $ storecfg , $ vmid , $ volid ) ;
} else {
my ( $ storeid , $ volname ) = PVE::Storage:: parse_volume_id ( $ volid , 1 ) ;
if ( $ storeid ) {
$ path = PVE::Storage:: path ( $ storecfg , $ volid ) ;
my $ scfg = PVE::Storage:: storage_config ( $ storecfg , $ storeid ) ;
$ format = qemu_img_format ( $ scfg , $ volname ) ;
} else {
$ path = $ volid ;
2016-01-25 17:13:36 +03:00
$ format = "raw" ;
2015-06-10 11:22:42 +03:00
}
}
2011-08-23 09:47:04 +04:00
my $ opts = '' ;
2018-02-08 14:09:23 +03:00
my @ qemu_drive_options = qw( heads secs cyls trans media format cache rerror werror aio discard ) ;
2011-08-23 09:47:04 +04:00
foreach my $ o ( @ qemu_drive_options ) {
2018-02-08 14:09:24 +03:00
$ opts . = ",$o=$drive->{$o}" if defined ( $ drive - > { $ o } ) ;
2011-09-12 14:26:00 +04:00
}
2018-02-08 14:09:23 +03:00
# snapshot only accepts on|off
if ( defined ( $ drive - > { snapshot } ) ) {
my $ v = $ drive - > { snapshot } ? 'on' : 'off' ;
$ opts . = ",snapshot=$v" ;
}
2017-05-30 16:30:15 +03:00
foreach my $ type ( [ '' , '-total' ] , [ _rd = > '-read' ] , [ _wr = > '-write' ] ) {
my ( $ dir , $ qmpname ) = @$ type ;
if ( my $ v = $ drive - > { "mbps$dir" } ) {
$ opts . = ",throttling.bps$qmpname=" . int ( $ v * 1024 * 1024 ) ;
}
if ( my $ v = $ drive - > { "mbps${dir}_max" } ) {
$ opts . = ",throttling.bps$qmpname-max=" . int ( $ v * 1024 * 1024 ) ;
}
if ( my $ v = $ drive - > { "bps${dir}_max_length" } ) {
$ opts . = ",throttling.bps$qmpname-max-length=$v" ;
}
if ( my $ v = $ drive - > { "iops${dir}" } ) {
$ opts . = ",throttling.iops$qmpname=$v" ;
}
if ( my $ v = $ drive - > { "iops${dir}_max" } ) {
2017-07-14 15:36:16 +03:00
$ opts . = ",throttling.iops$qmpname-max=$v" ;
2017-05-30 16:30:15 +03:00
}
if ( my $ v = $ drive - > { "iops${dir}_max_length" } ) {
2017-07-14 15:36:16 +03:00
$ opts . = ",throttling.iops$qmpname-max-length=$v" ;
2017-05-30 16:30:15 +03:00
}
}
2015-06-10 11:22:42 +03:00
$ opts . = ",format=$format" if $ format && ! $ drive - > { format } ;
2015-06-02 17:04:26 +03:00
my $ cache_direct = 0 ;
if ( my $ cache = $ drive - > { cache } ) {
$ cache_direct = $ cache =~ /^(?:off|none|directsync)$/ ;
} elsif ( ! drive_is_cdrom ( $ drive ) ) {
$ opts . = ",cache=none" ;
$ cache_direct = 1 ;
}
# aio native works only with O_DIRECT
if ( ! $ drive - > { aio } ) {
if ( $ cache_direct ) {
$ opts . = ",aio=native" ;
} else {
$ opts . = ",aio=threads" ;
}
}
2013-02-22 17:35:51 +04:00
2015-12-17 16:58:12 +03:00
if ( ! drive_is_cdrom ( $ drive ) ) {
my $ detectzeroes ;
2016-01-29 12:08:08 +03:00
if ( defined ( $ drive - > { detect_zeroes } ) && ! $ drive - > { detect_zeroes } ) {
2015-12-17 16:58:12 +03:00
$ detectzeroes = 'off' ;
} elsif ( $ drive - > { discard } ) {
$ detectzeroes = $ drive - > { discard } eq 'on' ? 'unmap' : 'on' ;
} else {
# This used to be our default with discard not being specified:
$ detectzeroes = 'on' ;
}
$ opts . = ",detect-zeroes=$detectzeroes" if $ detectzeroes ;
}
2014-10-13 11:45:30 +04:00
2011-08-23 09:47:04 +04:00
my $ pathinfo = $ path ? "file=$path," : '' ;
2011-09-07 17:34:37 +04:00
return "${pathinfo}if=none,id=drive-$drive->{interface}$drive->{index}$opts" ;
2011-08-23 09:47:04 +04:00
}
2012-01-28 14:02:28 +04:00
sub print_netdevice_full {
2018-11-12 16:10:42 +03:00
my ( $ vmid , $ conf , $ net , $ netid , $ bridges , $ use_old_bios_files , $ arch , $ machine_type ) = @ _ ;
2012-01-28 14:02:28 +04:00
my $ bootorder = $ conf - > { boot } || $ confdesc - > { boot } - > { default } ;
my $ device = $ net - > { model } ;
if ( $ net - > { model } eq 'virtio' ) {
$ device = 'virtio-net-pci' ;
} ;
2018-11-12 16:10:42 +03:00
my $ pciaddr = print_pci_addr ( "$netid" , $ bridges , $ arch , $ machine_type ) ;
2015-01-09 08:56:14 +03:00
my $ tmpstr = "$device,mac=$net->{macaddr},netdev=$netid$pciaddr,id=$netid" ;
2014-06-10 10:30:31 +04:00
if ( $ net - > { queues } && $ net - > { queues } > 1 && $ net - > { model } eq 'virtio' ) {
#Consider we have N queues, the number of vectors needed is 2*N + 2 (plus one config interrupt and control vq)
my $ vectors = $ net - > { queues } * 2 + 2 ;
$ tmpstr . = ",vectors=$vectors,mq=on" ;
}
2012-01-28 14:02:28 +04:00
$ tmpstr . = ",bootindex=$net->{bootindex}" if $ net - > { bootindex } ;
2015-10-23 11:41:53 +03:00
if ( $ use_old_bios_files ) {
my $ romfile ;
if ( $ device eq 'virtio-net-pci' ) {
$ romfile = 'pxe-virtio.rom' ;
} elsif ( $ device eq 'e1000' ) {
$ romfile = 'pxe-e1000.rom' ;
} elsif ( $ device eq 'ne2k' ) {
$ romfile = 'pxe-ne2k_pci.rom' ;
} elsif ( $ device eq 'pcnet' ) {
$ romfile = 'pxe-pcnet.rom' ;
} elsif ( $ device eq 'rtl8139' ) {
$ romfile = 'pxe-rtl8139.rom' ;
}
$ tmpstr . = ",romfile=$romfile" if $ romfile ;
}
2012-01-28 14:02:28 +04:00
return $ tmpstr ;
}
sub print_netdev_full {
2018-11-12 16:10:42 +03:00
my ( $ vmid , $ conf , $ arch , $ net , $ netid , $ hotplug ) = @ _ ;
2012-01-28 14:02:28 +04:00
my $ i = '' ;
if ( $ netid =~ m/^net(\d+)$/ ) {
$ i = int ( $ 1 ) ;
}
die "got strange net id '$i'\n" if $ i >= $ { MAX_NETS } ;
my $ ifname = "tap${vmid}i$i" ;
# kvm uses TUNSETIFF ioctl, and that limits ifname length
die "interface name '$ifname' is too long (max 15 character)\n"
if length ( $ ifname ) >= 16 ;
my $ vhostparam = '' ;
2018-11-12 16:10:40 +03:00
if ( is_native ( $ arch ) ) {
2018-12-20 12:44:13 +03:00
$ vhostparam = ',vhost=on' if kernel_has_vhost_net ( ) && $ net - > { model } eq 'virtio' ;
2018-11-12 16:10:40 +03:00
}
2012-01-28 14:02:28 +04:00
my $ vmname = $ conf - > { name } || "vm$vmid" ;
2014-06-10 10:30:31 +04:00
my $ netdev = "" ;
2015-11-06 17:05:59 +03:00
my $ script = $ hotplug ? "pve-bridge-hotplug" : "pve-bridge" ;
2014-06-10 10:30:31 +04:00
2012-01-28 14:02:28 +04:00
if ( $ net - > { bridge } ) {
2015-11-06 17:05:59 +03:00
$ netdev = "type=tap,id=$netid,ifname=${ifname},script=/var/lib/qemu-server/$script,downscript=/var/lib/qemu-server/pve-bridgedown$vhostparam" ;
2012-01-28 14:02:28 +04:00
} else {
2014-06-10 10:30:31 +04:00
$ netdev = "type=user,id=$netid,hostname=$vmname" ;
2012-01-28 14:02:28 +04:00
}
2014-06-10 10:30:31 +04:00
$ netdev . = ",queues=$net->{queues}" if ( $ net - > { queues } && $ net - > { model } eq 'virtio' ) ;
return $ netdev ;
2012-01-28 14:02:28 +04:00
}
2011-08-23 09:47:04 +04:00
2018-11-09 15:31:09 +03:00
my $ vga_map = {
'cirrus' = > 'cirrus-vga' ,
'std' = > 'VGA' ,
'vmware' = > 'vmware-svga' ,
'virtio' = > 'virtio-vga' ,
} ;
sub print_vga_device {
2019-11-19 14:23:49 +03:00
my ( $ conf , $ vga , $ arch , $ machine_version , $ machine , $ id , $ qxlnum , $ bridges ) = @ _ ;
2018-11-09 15:31:09 +03:00
my $ type = $ vga_map - > { $ vga - > { type } } ;
2018-12-17 11:19:58 +03:00
if ( $ arch eq 'aarch64' && defined ( $ type ) && $ type eq 'virtio-vga' ) {
2018-11-12 16:10:42 +03:00
$ type = 'virtio-gpu' ;
}
2018-11-09 15:31:09 +03:00
my $ vgamem_mb = $ vga - > { memory } ;
2019-11-19 18:18:19 +03:00
my $ max_outputs = '' ;
2018-11-09 15:31:09 +03:00
if ( $ qxlnum ) {
$ type = $ id ? 'qxl' : 'qxl-vga' ;
2019-11-19 18:18:19 +03:00
2019-11-20 17:10:16 +03:00
if ( ! $ conf - > { ostype } || $ conf - > { ostype } =~ m/^(?:l\d\d)|(?:other)$/ ) {
2019-11-19 18:18:19 +03:00
# set max outputs so linux can have up to 4 qxl displays with one device
2019-11-19 14:23:49 +03:00
if ( min_version ( $ machine_version , 4 , 1 ) ) {
2019-11-20 17:31:16 +03:00
$ max_outputs = ",max_outputs=4" ;
}
2019-11-19 18:18:19 +03:00
}
2018-11-09 15:31:09 +03:00
}
2019-11-19 18:18:19 +03:00
2018-11-09 15:31:09 +03:00
die "no devicetype for $vga->{type}\n" if ! $ type ;
my $ memory = "" ;
if ( $ vgamem_mb ) {
if ( $ vga - > { type } eq 'virtio' ) {
my $ bytes = PVE::Tools:: convert_size ( $ vgamem_mb , "mb" = > "b" ) ;
$ memory = ",max_hostmem=$bytes" ;
} elsif ( $ qxlnum ) {
# from https://www.spice-space.org/multiple-monitors.html
$ memory = ",vgamem_mb=$vga->{memory}" ;
my $ ram = $ vgamem_mb * 4 ;
my $ vram = $ vgamem_mb * 2 ;
$ memory . = ",ram_size_mb=$ram,vram_size_mb=$vram" ;
} else {
$ memory = ",vgamem_mb=$vga->{memory}" ;
}
} elsif ( $ qxlnum && $ id ) {
$ memory = ",ram_size=67108864,vram_size=33554432" ;
}
2019-11-19 14:23:48 +03:00
my $ q35 = PVE::QemuServer::Machine:: machine_type_is_q35 ( $ conf ) ;
2018-11-09 15:31:09 +03:00
my $ vgaid = "vga" . ( $ id // '' ) ;
my $ pciaddr ;
2018-11-09 16:01:45 +03:00
2018-11-09 15:31:09 +03:00
if ( $ q35 && $ vgaid eq 'vga' ) {
2018-11-09 16:01:45 +03:00
# the first display uses pcie.0 bus on q35 machines
2018-11-12 16:10:42 +03:00
$ pciaddr = print_pcie_addr ( $ vgaid , $ bridges , $ arch , $ machine ) ;
2018-11-09 15:31:09 +03:00
} else {
2018-11-12 16:10:42 +03:00
$ pciaddr = print_pci_addr ( $ vgaid , $ bridges , $ arch , $ machine ) ;
2018-11-09 15:31:09 +03:00
}
2019-11-19 18:18:19 +03:00
return "$type,id=${vgaid}${memory}${max_outputs}${pciaddr}" ;
2018-11-09 15:31:09 +03:00
}
2016-03-30 13:20:10 +03:00
sub parse_number_sets {
my ( $ set ) = @ _ ;
my $ res = [] ;
foreach my $ part ( split ( /;/ , $ set ) ) {
if ( $ part =~ /^\s*(\d+)(?:-(\d+))?\s*$/ ) {
die "invalid range: $part ($2 < $1)\n" if defined ( $ 2 ) && $ 2 < $ 1 ;
push @$ res , [ $ 1 , $ 2 ] ;
add custom numa topology support
numaX: cpus=<id[-id],memory=<mb>[[,hostnodes=<id[-id]>][,policy=<preferred|bind|interleave>]]
example:
-------
sockets:4
cores:2
memory:4096
numa: 1
numa0: cpus=0-1,memory=1024,hostnodes=0-1,policy=interleave
numa1: cpus=2-3,memory=3072,hostnodes=2,policy=bind
qemu command line
-----------------
-object memory-backend-ram,size=1024M,policy=interleave,host-nodes=0-1,id=ram-node0
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0
-object memory-backend-ram,size=3072M,policy=bind,host-nodes=2,id=ram-node1
-numa node,nodeid=1,cpus=2-3,memdev=ram-node1
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-12-03 18:23:48 +03:00
} else {
2016-03-30 13:20:10 +03:00
die "invalid range: $part\n" ;
add custom numa topology support
numaX: cpus=<id[-id],memory=<mb>[[,hostnodes=<id[-id]>][,policy=<preferred|bind|interleave>]]
example:
-------
sockets:4
cores:2
memory:4096
numa: 1
numa0: cpus=0-1,memory=1024,hostnodes=0-1,policy=interleave
numa1: cpus=2-3,memory=3072,hostnodes=2,policy=bind
qemu command line
-----------------
-object memory-backend-ram,size=1024M,policy=interleave,host-nodes=0-1,id=ram-node0
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0
-object memory-backend-ram,size=3072M,policy=bind,host-nodes=2,id=ram-node1
-numa node,nodeid=1,cpus=2-3,memdev=ram-node1
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-12-03 18:23:48 +03:00
}
}
2016-03-30 13:20:10 +03:00
return $ res ;
}
add custom numa topology support
numaX: cpus=<id[-id],memory=<mb>[[,hostnodes=<id[-id]>][,policy=<preferred|bind|interleave>]]
example:
-------
sockets:4
cores:2
memory:4096
numa: 1
numa0: cpus=0-1,memory=1024,hostnodes=0-1,policy=interleave
numa1: cpus=2-3,memory=3072,hostnodes=2,policy=bind
qemu command line
-----------------
-object memory-backend-ram,size=1024M,policy=interleave,host-nodes=0-1,id=ram-node0
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0
-object memory-backend-ram,size=3072M,policy=bind,host-nodes=2,id=ram-node1
-numa node,nodeid=1,cpus=2-3,memdev=ram-node1
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-12-03 18:23:48 +03:00
2016-03-30 13:20:10 +03:00
sub parse_numa {
my ( $ data ) = @ _ ;
my $ res = PVE::JSONSchema:: parse_property_string ( $ numa_fmt , $ data ) ;
$ res - > { cpus } = parse_number_sets ( $ res - > { cpus } ) if defined ( $ res - > { cpus } ) ;
$ res - > { hostnodes } = parse_number_sets ( $ res - > { hostnodes } ) if defined ( $ res - > { hostnodes } ) ;
add custom numa topology support
numaX: cpus=<id[-id],memory=<mb>[[,hostnodes=<id[-id]>][,policy=<preferred|bind|interleave>]]
example:
-------
sockets:4
cores:2
memory:4096
numa: 1
numa0: cpus=0-1,memory=1024,hostnodes=0-1,policy=interleave
numa1: cpus=2-3,memory=3072,hostnodes=2,policy=bind
qemu command line
-----------------
-object memory-backend-ram,size=1024M,policy=interleave,host-nodes=0-1,id=ram-node0
-numa node,nodeid=0,cpus=0-1,memdev=ram-node0
-object memory-backend-ram,size=3072M,policy=bind,host-nodes=2,id=ram-node1
-numa node,nodeid=1,cpus=2-3,memdev=ram-node1
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-12-03 18:23:48 +03:00
return $ res ;
}
2011-09-11 10:59:59 +04:00
sub parse_hostpci {
my ( $ value ) = @ _ ;
return undef if ! $ value ;
2016-03-30 13:20:12 +03:00
my $ res = PVE::JSONSchema:: parse_property_string ( $ hostpci_fmt , $ value ) ;
2013-12-13 14:43:05 +04:00
2016-03-30 13:20:12 +03:00
my @ idlist = split ( /;/ , $ res - > { host } ) ;
delete $ res - > { host } ;
foreach my $ id ( @ idlist ) {
fix #2510: hostpci: always check if device exists
if the user set a device as hostpci with the 'shorthand' syntax:
hostpciX: 00:12
we ignored it on starting and showcmd and continued.
Since the user explicitly wanted to passthrough a device, we now check
if there is actually a device with that id
for explicitly configured devices (00:12.1), we did not check if it exists,
but the kvm call failed with a non-obvious error message
now we always call 'lspci' from SysFSTools to check if it actually exists,
and fail if not. With this, we can drop the workaround for adding
'0000' if no domain was given, since lspci does it already for us
this fixes #2510, an issue with using mediated devices where the users did not have
the domain in the config, since we forgot to add the default domain there
the only issue with this patch is that it changes the behaviour of
'showcmd' slightly, as in now, we die if the device was explicitly
given, but did not exists (we showed the commandline, now we fail)
this also slightly changes the commandline for qemu (adding always
the domain), which is not a problem since we cannot live migrate
or snapshot such vms, but we have to adapt the tests
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2019-12-09 12:10:04 +03:00
my $ devs = PVE::SysFSTools:: lspci ( $ id ) ;
2019-12-09 13:29:26 +03:00
die "no PCI device found for '$id'\n" if ! scalar ( @$ devs ) ;
fix #2510: hostpci: always check if device exists
if the user set a device as hostpci with the 'shorthand' syntax:
hostpciX: 00:12
we ignored it on starting and showcmd and continued.
Since the user explicitly wanted to passthrough a device, we now check
if there is actually a device with that id
for explicitly configured devices (00:12.1), we did not check if it exists,
but the kvm call failed with a non-obvious error message
now we always call 'lspci' from SysFSTools to check if it actually exists,
and fail if not. With this, we can drop the workaround for adding
'0000' if no domain was given, since lspci does it already for us
this fixes #2510, an issue with using mediated devices where the users did not have
the domain in the config, since we forgot to add the default domain there
the only issue with this patch is that it changes the behaviour of
'showcmd' slightly, as in now, we die if the device was explicitly
given, but did not exists (we showed the commandline, now we fail)
this also slightly changes the commandline for qemu (adding always
the domain), which is not a problem since we cannot live migrate
or snapshot such vms, but we have to adapt the tests
Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
2019-12-09 12:10:04 +03:00
push @ { $ res - > { pciid } } , @$ devs ;
2011-09-11 10:59:59 +04:00
}
return $ res ;
}
2011-08-23 09:47:04 +04:00
# netX: e1000=XX:XX:XX:XX:XX:XX,bridge=vmbr0,rate=<mbps>
sub parse_net {
my ( $ data ) = @ _ ;
2016-03-30 13:20:11 +03:00
my $ res = eval { PVE::JSONSchema:: parse_property_string ( $ net_fmt , $ data ) } ;
if ( $@ ) {
warn $@ ;
return undef ;
2011-08-23 09:47:04 +04:00
}
2016-07-13 17:25:44 +03:00
if ( ! defined ( $ res - > { macaddr } ) ) {
my $ dc = PVE::Cluster:: cfs_read_file ( 'datacenter.cfg' ) ;
$ res - > { macaddr } = PVE::Tools:: random_ether_addr ( $ dc - > { mac_prefix } ) ;
}
2015-06-16 15:26:43 +03:00
return $ res ;
}
# ipconfigX ip=cidr,gw=ip,ip6=cidr,gw6=ip
sub parse_ipconfig {
my ( $ data ) = @ _ ;
my $ res = eval { PVE::JSONSchema:: parse_property_string ( $ ipconfig_fmt , $ data ) } ;
if ( $@ ) {
warn $@ ;
return undef ;
}
if ( $ res - > { gw } && ! $ res - > { ip } ) {
warn 'gateway specified without specifying an IP address' ;
return undef ;
}
if ( $ res - > { gw6 } && ! $ res - > { ip6 } ) {
warn 'IPv6 gateway specified without specifying an IPv6 address' ;
return undef ;
}
if ( $ res - > { gw } && $ res - > { ip } eq 'dhcp' ) {
warn 'gateway specified together with DHCP' ;
return undef ;
}
if ( $ res - > { gw6 } && $ res - > { ip6 } !~ /^$IPV6RE/ ) {
# gw6 + auto/dhcp
warn "IPv6 gateway specified together with $res->{ip6} address" ;
return undef ;
}
if ( ! $ res - > { ip } && ! $ res - > { ip6 } ) {
return { ip = > 'dhcp' , ip6 = > 'dhcp' } ;
}
2011-08-23 09:47:04 +04:00
return $ res ;
}
sub print_net {
my $ net = shift ;
2016-03-30 13:20:11 +03:00
return PVE::JSONSchema:: print_property_string ( $ net , $ net_fmt ) ;
2011-08-23 09:47:04 +04:00
}
sub add_random_macs {
my ( $ settings ) = @ _ ;
foreach my $ opt ( keys %$ settings ) {
next if $ opt !~ m/^net(\d+)$/ ;
my $ net = parse_net ( $ settings - > { $ opt } ) ;
next if ! $ net ;
$ settings - > { $ opt } = print_net ( $ net ) ;
}
}
2014-11-17 09:08:44 +03:00
sub vm_is_volid_owner {
my ( $ storecfg , $ vmid , $ volid ) = @ _ ;
if ( $ volid !~ m | ^ / | ) {
my ( $ path , $ owner ) ;
eval { ( $ path , $ owner ) = PVE::Storage:: path ( $ storecfg , $ volid ) ; } ;
if ( $ owner && ( $ owner == $ vmid ) ) {
return 1 ;
}
}
return undef ;
}
sub vmconfig_register_unused_drive {
my ( $ storecfg , $ vmid , $ conf , $ drive ) = @ _ ;
2016-04-04 11:04:10 +03:00
if ( drive_is_cloudinit ( $ drive ) ) {
eval { PVE::Storage:: vdisk_free ( $ storecfg , $ drive - > { file } ) } ;
warn $@ if $@ ;
} elsif ( ! drive_is_cdrom ( $ drive ) ) {
2014-11-17 09:08:44 +03:00
my $ volid = $ drive - > { file } ;
if ( vm_is_volid_owner ( $ storecfg , $ vmid , $ volid ) ) {
2016-03-07 14:41:14 +03:00
PVE::QemuConfig - > add_unused_volume ( $ conf , $ volid , $ vmid ) ;
2014-11-17 09:08:44 +03:00
}
}
}
2019-06-11 13:13:52 +03:00
# smbios: [manufacturer=str][,product=str][,version=str][,serial=str][,uuid=uuid][,sku=str][,family=str][,base64=bool]
2016-03-30 13:20:07 +03:00
my $ smbios1_fmt = {
2016-01-14 15:33:55 +03:00
uuid = > {
type = > 'string' ,
pattern = > '[a-fA-F0-9]{8}(?:-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}' ,
format_description = > 'UUID' ,
2016-05-19 14:13:25 +03:00
description = > "Set SMBIOS1 UUID." ,
2016-01-14 15:33:55 +03:00
optional = > 1 ,
} ,
version = > {
type = > 'string' ,
2019-06-11 13:13:52 +03:00
pattern = > '[A-Za-z0-9+\/]+={0,2}' ,
format_description = > 'Base64 encoded string' ,
2016-05-19 14:13:25 +03:00
description = > "Set SMBIOS1 version." ,
2016-01-14 15:33:55 +03:00
optional = > 1 ,
} ,
serial = > {
type = > 'string' ,
2019-06-11 13:13:52 +03:00
pattern = > '[A-Za-z0-9+\/]+={0,2}' ,
format_description = > 'Base64 encoded string' ,
2016-05-19 14:13:25 +03:00
description = > "Set SMBIOS1 serial number." ,
2016-01-14 15:33:55 +03:00
optional = > 1 ,
} ,
manufacturer = > {
type = > 'string' ,
2019-06-11 13:13:52 +03:00
pattern = > '[A-Za-z0-9+\/]+={0,2}' ,
format_description = > 'Base64 encoded string' ,
2016-05-19 14:13:25 +03:00
description = > "Set SMBIOS1 manufacturer." ,
2016-01-14 15:33:55 +03:00
optional = > 1 ,
} ,
product = > {
type = > 'string' ,
2019-06-11 13:13:52 +03:00
pattern = > '[A-Za-z0-9+\/]+={0,2}' ,
format_description = > 'Base64 encoded string' ,
2016-05-19 14:13:25 +03:00
description = > "Set SMBIOS1 product ID." ,
2016-01-14 15:33:55 +03:00
optional = > 1 ,
} ,
sku = > {
type = > 'string' ,
2019-06-11 13:13:52 +03:00
pattern = > '[A-Za-z0-9+\/]+={0,2}' ,
format_description = > 'Base64 encoded string' ,
2016-05-19 14:13:25 +03:00
description = > "Set SMBIOS1 SKU string." ,
2016-01-14 15:33:55 +03:00
optional = > 1 ,
} ,
family = > {
type = > 'string' ,
2019-06-11 13:13:52 +03:00
pattern = > '[A-Za-z0-9+\/]+={0,2}' ,
format_description = > 'Base64 encoded string' ,
2016-05-19 14:13:25 +03:00
description = > "Set SMBIOS1 family string." ,
2016-01-14 15:33:55 +03:00
optional = > 1 ,
} ,
2019-06-11 13:13:52 +03:00
base64 = > {
type = > 'boolean' ,
description = > 'Flag to indicate that the SMBIOS values are base64 encoded' ,
optional = > 1 ,
} ,
2014-06-26 13:12:25 +04:00
} ;
sub parse_smbios1 {
my ( $ data ) = @ _ ;
2016-03-30 13:20:07 +03:00
my $ res = eval { PVE::JSONSchema:: parse_property_string ( $ smbios1_fmt , $ data ) } ;
2016-01-14 15:33:55 +03:00
warn $@ if $@ ;
2014-06-26 13:12:25 +04:00
return $ res ;
}
2014-08-26 11:20:09 +04:00
sub print_smbios1 {
my ( $ smbios1 ) = @ _ ;
2016-03-30 13:20:07 +03:00
return PVE::JSONSchema:: print_property_string ( $ smbios1 , $ smbios1_fmt ) ;
2014-08-26 11:20:09 +04:00
}
2016-03-30 13:20:07 +03:00
PVE::JSONSchema:: register_format ( 'pve-qm-smbios1' , $ smbios1_fmt ) ;
2014-06-26 13:12:25 +04:00
2011-09-08 13:39:56 +04:00
sub parse_watchdog {
my ( $ value ) = @ _ ;
return undef if ! $ value ;
2016-03-30 13:20:13 +03:00
my $ res = eval { PVE::JSONSchema:: parse_property_string ( $ watchdog_fmt , $ value ) } ;
warn $@ if $@ ;
2011-09-08 13:39:56 +04:00
return $ res ;
}
2018-08-01 21:29:04 +03:00
sub parse_guest_agent {
my ( $ value ) = @ _ ;
return { } if ! defined ( $ value - > { agent } ) ;
my $ res = eval { PVE::JSONSchema:: parse_property_string ( $ agent_fmt , $ value - > { agent } ) } ;
warn $@ if $@ ;
# if the agent is disabled ignore the other potentially set properties
return { } if ! $ res - > { enabled } ;
return $ res ;
}
2018-11-09 15:31:09 +03:00
sub parse_vga {
my ( $ value ) = @ _ ;
return { } if ! $ value ;
my $ res = eval { PVE::JSONSchema:: parse_property_string ( $ vga_fmt , $ value ) } ;
warn $@ if $@ ;
return $ res ;
}
fix #2264: add virtio-rng device
Allow a user to add a virtio-rng-pci (an emulated hardware random
number generator) to a VM with the rng0 setting. The setting is
version_guard()-ed.
Limit the selection of entropy source to one of three:
/dev/urandom (preferred): Non-blocking kernel entropy source
/dev/random: Blocking kernel source
/dev/hwrng: Hardware RNG on the host for passthrough
QEMU itself defaults to /dev/urandom (or the equivalent getrandom()
call) if no source file is given, but I don't fully trust that
behaviour to stay constant, considering the documentation [0] already
disagrees with the code [1], so let's always specify the file ourselves.
/dev/urandom is preferred, since it prevents host entropy starvation.
The quality of randomness is still good enough to emulate a hwrng, since
a) it's still seeded from the kernel's true entropy pool periodically
and b) it's mixed with true entropy in the guest as well.
Additionally, all sources about entropy predicition attacks I could find
mention that to predict /dev/urandom results, /dev/random has to be
accessed or manipulated in one way or the other - this is not possible
from a VM however, as the entropy we're talking about comes from the
*hosts* blocking pool.
More about the entropy and security implications of the non-blocking
interface in [2] and [3].
Note further that only one /dev/hwrng exists at any given time, if
multiple RNGs are available, only the one selected in
'/sys/devices/virtual/misc/hw_random/rng_current' will feed the file.
Selecting this is left as an exercise to the user, if at all required.
We limit the available entropy to 1 KiB/s by default, but allow the user
to override this. Interesting to note is that the limiter does not work
linearly, i.e. max_bytes=1024/period=1000 means that up to 1 KiB of data
becomes available on a 1000 millisecond timer, not that 1 KiB is
streamed to the guest over the course of one second - hence the
configurable period.
The default used here is the same as given in the QEMU documentation [0]
and has been verified to affect entropy availability in a guest by
measuring /dev/random throughput. 1 KiB/s is enough to avoid any
early-boot entropy shortages, and already has a significant impact on
/dev/random availability in the guest.
[0] https://wiki.qemu.org/Features/VirtIORNG
[1] https://git.qemu.org/?p=qemu.git;a=blob;f=crypto/random-platform.c;h=f92f96987d7d262047c7604b169a7fdf11236107;hb=HEAD
[2] https://lwn.net/Articles/261804/
[3] https://lwn.net/Articles/808575/
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-02-20 20:10:44 +03:00
sub parse_rng {
my ( $ value ) = @ _ ;
return undef if ! $ value ;
my $ res = eval { PVE::JSONSchema:: parse_property_string ( $ rng_fmt , $ value ) } ;
warn $@ if $@ ;
return $ res ;
}
2011-08-23 09:47:04 +04:00
PVE::JSONSchema:: register_format ( 'pve-qm-usb-device' , \ & verify_usb_device ) ;
sub verify_usb_device {
my ( $ value , $ noerr ) = @ _ ;
return $ value if parse_usb_device ( $ value ) ;
return undef if $ noerr ;
2011-09-12 14:26:00 +04:00
2011-08-23 09:47:04 +04:00
die "unable to parse usb device\n" ;
}
# add JSON properties for create and set function
sub json_config_properties {
my $ prop = shift ;
foreach my $ opt ( keys %$ confdesc ) {
2018-09-14 15:08:43 +03:00
next if $ opt eq 'parent' || $ opt eq 'snaptime' || $ opt eq 'vmstate' || $ opt eq 'runningmachine' ;
2011-08-23 09:47:04 +04:00
$ prop - > { $ opt } = $ confdesc - > { $ opt } ;
}
return $ prop ;
}
2018-03-07 11:26:33 +03:00
# return copy of $confdesc_cloudinit to generate documentation
sub cloudinit_config_properties {
return dclone ( $ confdesc_cloudinit ) ;
}
2011-08-23 09:47:04 +04:00
sub check_type {
my ( $ key , $ value ) = @ _ ;
die "unknown setting '$key'\n" if ! $ confdesc - > { $ key } ;
my $ type = $ confdesc - > { $ key } - > { type } ;
2011-09-15 11:11:27 +04:00
if ( ! defined ( $ value ) ) {
2011-08-23 09:47:04 +04:00
die "got undefined value\n" ;
}
if ( $ value =~ m/[\n\r]/ ) {
die "property contains a line feed\n" ;
}
if ( $ type eq 'boolean' ) {
2011-09-12 14:26:00 +04:00
return 1 if ( $ value eq '1' ) || ( $ value =~ m/^(on|yes|true)$/i ) ;
return 0 if ( $ value eq '0' ) || ( $ value =~ m/^(off|no|false)$/i ) ;
die "type check ('boolean') failed - got '$value'\n" ;
2011-08-23 09:47:04 +04:00
} elsif ( $ type eq 'integer' ) {
return int ( $ 1 ) if $ value =~ m/^(\d+)$/ ;
die "type check ('integer') failed - got '$value'\n" ;
2012-12-30 22:03:00 +04:00
} elsif ( $ type eq 'number' ) {
return $ value if $ value =~ m/^(\d+)(\.\d+)?$/ ;
die "type check ('number') failed - got '$value'\n" ;
2011-08-23 09:47:04 +04:00
} elsif ( $ type eq 'string' ) {
if ( my $ fmt = $ confdesc - > { $ key } - > { format } ) {
PVE::JSONSchema:: check_format ( $ fmt , $ value ) ;
2011-09-12 14:26:00 +04:00
return $ value ;
}
2011-08-23 09:47:04 +04:00
$ value =~ s/^\"(.*)\"$/$1/ ;
2011-09-12 14:26:00 +04:00
return $ value ;
2011-08-23 09:47:04 +04:00
} else {
die "internal error"
}
}
sub destroy_vm {
2019-11-08 19:03:28 +03:00
my ( $ storecfg , $ vmid , $ skiplock , $ replacement_conf ) = @ _ ;
2011-08-23 09:47:04 +04:00
2016-03-07 14:41:12 +03:00
my $ conf = PVE::QemuConfig - > load_config ( $ vmid ) ;
2011-08-23 09:47:04 +04:00
2016-03-07 14:41:12 +03:00
PVE::QemuConfig - > check_lock ( $ conf ) if ! $ skiplock ;
2011-08-23 09:47:04 +04:00
2017-10-13 11:00:53 +03:00
if ( $ conf - > { template } ) {
# check if any base image is still used by a linked clone
foreach_drive ( $ conf , sub {
my ( $ ds , $ drive ) = @ _ ;
return if drive_is_cdrom ( $ drive ) ;
my $ volid = $ drive - > { file } ;
return if ! $ volid || $ volid =~ m | ^ / | ;
die "base volume '$volid' is still in use by linked cloned\n"
if PVE::Storage:: volume_is_base_and_used ( $ storecfg , $ volid ) ;
} ) ;
}
2011-09-12 14:26:00 +04:00
# only remove disks owned by this VM
2011-08-23 09:47:04 +04:00
foreach_drive ( $ conf , sub {
my ( $ ds , $ drive ) = @ _ ;
2015-08-04 15:31:19 +03:00
return if drive_is_cdrom ( $ drive , 1 ) ;
2011-08-23 09:47:04 +04:00
my $ volid = $ drive - > { file } ;
2011-11-25 11:05:36 +04:00
return if ! $ volid || $ volid =~ m | ^ / | ;
2011-08-23 09:47:04 +04:00
2011-09-15 11:11:27 +04:00
my ( $ path , $ owner ) = PVE::Storage:: path ( $ storecfg , $ volid ) ;
2011-11-25 11:05:36 +04:00
return if ! $ path || ! $ owner || ( $ owner != $ vmid ) ;
2011-08-23 09:47:04 +04:00
2019-11-08 17:35:32 +03:00
eval { PVE::Storage:: vdisk_free ( $ storecfg , $ volid ) } ;
2016-12-20 14:30:57 +03:00
warn "Could not remove disk '$volid', check manually: $@" if $@ ;
2011-08-23 09:47:04 +04:00
} ) ;
2011-09-12 14:26:00 +04:00
2011-08-23 09:47:04 +04:00
# also remove unused disk
2019-11-08 17:35:32 +03:00
my $ vmdisks = PVE::Storage:: vdisk_list ( $ storecfg , undef , $ vmid ) ;
PVE::Storage:: foreach_volid ( $ vmdisks , sub {
my ( $ volid , $ sid , $ volname , $ d ) = @ _ ;
eval { PVE::Storage:: vdisk_free ( $ storecfg , $ volid ) } ;
2011-08-23 09:47:04 +04:00
warn $@ if $@ ;
2019-11-08 17:35:32 +03:00
} ) ;
2019-10-25 12:24:01 +03:00
2019-11-08 19:03:28 +03:00
if ( defined $ replacement_conf ) {
2019-11-14 11:49:26 +03:00
PVE::QemuConfig - > write_config ( $ vmid , $ replacement_conf ) ;
2019-10-25 12:24:01 +03:00
} else {
PVE::QemuConfig - > destroy_config ( $ vmid ) ;
}
2011-08-23 09:47:04 +04:00
}
sub parse_vm_config {
my ( $ filename , $ raw ) = @ _ ;
return undef if ! defined ( $ raw ) ;
2011-09-07 13:41:34 +04:00
my $ res = {
2012-03-20 15:25:08 +04:00
digest = > Digest::SHA:: sha1_hex ( $ raw ) ,
2012-09-07 13:51:19 +04:00
snapshots = > { } ,
2014-11-11 08:52:10 +03:00
pending = > { } ,
2011-09-07 13:41:34 +04:00
} ;
2011-08-23 09:47:04 +04:00
2011-09-12 14:26:00 +04:00
$ filename =~ m | /qemu-server/ ( \ d + ) \ . conf $|
2011-08-23 09:47:04 +04:00
|| die "got strange filename '$filename'" ;
my $ vmid = $ 1 ;
2012-09-07 13:51:19 +04:00
my $ conf = $ res ;
2015-08-11 12:24:41 +03:00
my $ descr ;
2014-11-11 09:01:01 +03:00
my $ section = '' ;
2012-03-01 11:13:14 +04:00
2012-09-07 13:51:19 +04:00
my @ lines = split ( /\n/ , $ raw ) ;
foreach my $ line ( @ lines ) {
2011-08-23 09:47:04 +04:00
next if $ line =~ m/^\s*$/ ;
2013-07-15 11:13:31 +04:00
2014-10-30 15:40:22 +03:00
if ( $ line =~ m/^\[PENDING\]\s*$/i ) {
2014-11-11 09:01:01 +03:00
$ section = 'pending' ;
2015-08-11 12:24:41 +03:00
if ( defined ( $ descr ) ) {
$ descr =~ s/\s+$// ;
$ conf - > { description } = $ descr ;
}
$ descr = undef ;
2014-11-11 09:01:01 +03:00
$ conf = $ res - > { $ section } = { } ;
2014-10-30 15:40:22 +03:00
next ;
2014-11-11 08:52:10 +03:00
} elsif ( $ line =~ m/^\[([a-z][a-z0-9_\-]+)\]\s*$/i ) {
2014-11-11 09:01:01 +03:00
$ section = $ 1 ;
2015-08-11 12:24:41 +03:00
if ( defined ( $ descr ) ) {
$ descr =~ s/\s+$// ;
$ conf - > { description } = $ descr ;
}
$ descr = undef ;
2014-11-11 09:01:01 +03:00
$ conf = $ res - > { snapshots } - > { $ section } = { } ;
2012-09-07 13:51:19 +04:00
next ;
}
2011-08-23 09:47:04 +04:00
2012-03-01 11:13:14 +04:00
if ( $ line =~ m/^\#(.*)\s*$/ ) {
2015-08-11 12:24:41 +03:00
$ descr = '' if ! defined ( $ descr ) ;
2012-03-01 11:13:14 +04:00
$ descr . = PVE::Tools:: decode_text ( $ 1 ) . "\n" ;
next ;
}
2011-08-23 09:47:04 +04:00
if ( $ line =~ m/^(description):\s*(.*\S)\s*$/ ) {
2015-08-11 12:24:41 +03:00
$ descr = '' if ! defined ( $ descr ) ;
2012-03-01 11:13:14 +04:00
$ descr . = PVE::Tools:: decode_text ( $ 2 ) ;
2012-09-07 13:51:19 +04:00
} elsif ( $ line =~ m/snapstate:\s*(prepare|delete)\s*$/ ) {
$ conf - > { snapstate } = $ 1 ;
2011-08-23 09:47:04 +04:00
} elsif ( $ line =~ m/^(args):\s*(.*\S)\s*$/ ) {
my $ key = $ 1 ;
my $ value = $ 2 ;
2012-09-07 13:51:19 +04:00
$ conf - > { $ key } = $ value ;
2014-11-11 09:40:07 +03:00
} elsif ( $ line =~ m/^delete:\s*(.*\S)\s*$/ ) {
2014-11-11 09:01:01 +03:00
my $ value = $ 1 ;
2014-11-11 09:40:07 +03:00
if ( $ section eq 'pending' ) {
$ conf - > { delete } = $ value ; # we parse this later
} else {
warn "vm $vmid - propertry 'delete' is only allowed in [PENDING]\n" ;
2014-10-30 15:40:22 +03:00
}
2016-04-04 13:15:34 +03:00
} elsif ( $ line =~ m/^([a-z][a-z_]*\d*):\s*(.+?)\s*$/ ) {
2011-08-23 09:47:04 +04:00
my $ key = $ 1 ;
my $ value = $ 2 ;
eval { $ value = check_type ( $ key , $ value ) ; } ;
if ( $@ ) {
warn "vm $vmid - unable to parse value of '$key' - $@" ;
} else {
2016-10-18 11:38:58 +03:00
$ key = 'ide2' if $ key eq 'cdrom' ;
2011-08-23 09:47:04 +04:00
my $ fmt = $ confdesc - > { $ key } - > { format } ;
2016-10-18 11:38:58 +03:00
if ( $ fmt && $ fmt =~ /^pve-qm-(?:ide|scsi|virtio|sata)$/ ) {
2011-08-23 09:47:04 +04:00
my $ v = parse_drive ( $ key , $ value ) ;
if ( my $ volid = filename_to_volume_id ( $ vmid , $ v - > { file } , $ v - > { media } ) ) {
$ v - > { file } = $ volid ;
2019-12-05 18:11:01 +03:00
$ value = print_drive ( $ v ) ;
2011-08-23 09:47:04 +04:00
} else {
warn "vm $vmid - unable to parse value of '$key'\n" ;
next ;
}
}
2016-10-18 11:38:58 +03:00
$ conf - > { $ key } = $ value ;
2011-08-23 09:47:04 +04:00
}
}
}
2015-08-11 12:24:41 +03:00
if ( defined ( $ descr ) ) {
$ descr =~ s/\s+$// ;
$ conf - > { description } = $ descr ;
}
2012-09-07 13:51:19 +04:00
delete $ res - > { snapstate } ; # just to be sure
2011-08-23 09:47:04 +04:00
return $ res ;
}
2012-02-02 17:01:08 +04:00
sub write_vm_config {
my ( $ filename , $ conf ) = @ _ ;
2011-08-23 09:47:04 +04:00
2012-09-07 13:51:19 +04:00
delete $ conf - > { snapstate } ; # just to be sure
2012-02-02 17:01:08 +04:00
if ( $ conf - > { cdrom } ) {
die "option ide2 conflicts with cdrom\n" if $ conf - > { ide2 } ;
$ conf - > { ide2 } = $ conf - > { cdrom } ;
delete $ conf - > { cdrom } ;
}
2011-08-23 09:47:04 +04:00
# we do not use 'smp' any longer
2012-02-02 17:01:08 +04:00
if ( $ conf - > { sockets } ) {
delete $ conf - > { smp } ;
} elsif ( $ conf - > { smp } ) {
$ conf - > { sockets } = $ conf - > { smp } ;
delete $ conf - > { cores } ;
delete $ conf - > { smp } ;
2011-08-23 09:47:04 +04:00
}
2012-09-10 13:49:32 +04:00
my $ used_volids = { } ;
2012-09-07 13:51:19 +04:00
2012-09-10 13:49:32 +04:00
my $ cleanup_config = sub {
2014-11-11 09:40:07 +03:00
my ( $ cref , $ pending , $ snapname ) = @ _ ;
2012-02-02 17:01:08 +04:00
2012-09-10 13:49:32 +04:00
foreach my $ key ( keys %$ cref ) {
next if $ key eq 'digest' || $ key eq 'description' || $ key eq 'snapshots' ||
2014-11-11 09:40:07 +03:00
$ key eq 'snapstate' || $ key eq 'pending' ;
2012-09-10 13:49:32 +04:00
my $ value = $ cref - > { $ key } ;
2014-11-11 09:40:07 +03:00
if ( $ key eq 'delete' ) {
die "propertry 'delete' is only allowed in [PENDING]\n"
if ! $ pending ;
# fixme: check syntax?
next ;
}
2012-09-10 13:49:32 +04:00
eval { $ value = check_type ( $ key , $ value ) ; } ;
die "unable to parse value of '$key' - $@" if $@ ;
2012-02-02 17:01:08 +04:00
2012-09-10 13:49:32 +04:00
$ cref - > { $ key } = $ value ;
2016-03-03 17:45:15 +03:00
if ( ! $ snapname && is_valid_drivename ( $ key ) ) {
2013-01-04 09:57:11 +04:00
my $ drive = parse_drive ( $ key , $ value ) ;
2012-09-10 13:49:32 +04:00
$ used_volids - > { $ drive - > { file } } = 1 if $ drive && $ drive - > { file } ;
}
2011-08-23 09:47:04 +04:00
}
2012-09-10 13:49:32 +04:00
} ;
& $ cleanup_config ( $ conf ) ;
2014-11-11 09:40:07 +03:00
& $ cleanup_config ( $ conf - > { pending } , 1 ) ;
2012-09-10 13:49:32 +04:00
foreach my $ snapname ( keys % { $ conf - > { snapshots } } ) {
2019-10-24 14:53:09 +03:00
die "internal error: snapshot name '$snapname' is forbidden" if lc ( $ snapname ) eq 'pending' ;
2014-11-11 09:40:07 +03:00
& $ cleanup_config ( $ conf - > { snapshots } - > { $ snapname } , undef , $ snapname ) ;
2011-08-23 09:47:04 +04:00
}
2012-02-02 17:01:08 +04:00
# remove 'unusedX' settings if we re-add a volume
foreach my $ key ( keys %$ conf ) {
my $ value = $ conf - > { $ key } ;
2012-09-10 13:49:32 +04:00
if ( $ key =~ m/^unused/ && $ used_volids - > { $ value } ) {
2012-02-02 17:01:08 +04:00
delete $ conf - > { $ key } ;
2011-08-23 09:47:04 +04:00
}
2012-02-02 17:01:08 +04:00
}
2013-07-15 11:13:31 +04:00
2012-09-07 13:51:19 +04:00
my $ generate_raw_config = sub {
2015-08-11 12:24:41 +03:00
my ( $ conf , $ pending ) = @ _ ;
2012-03-01 11:13:14 +04:00
2012-09-07 13:51:19 +04:00
my $ raw = '' ;
# add description as comment to top of file
2015-08-11 12:24:41 +03:00
if ( defined ( my $ descr = $ conf - > { description } ) ) {
if ( $ descr ) {
foreach my $ cl ( split ( /\n/ , $ descr ) ) {
$ raw . = '#' . PVE::Tools:: encode_text ( $ cl ) . "\n" ;
}
} else {
$ raw . = "#\n" if $ pending ;
}
2012-09-07 13:51:19 +04:00
}
foreach my $ key ( sort keys %$ conf ) {
2014-11-11 09:40:07 +03:00
next if $ key eq 'digest' || $ key eq 'description' || $ key eq 'pending' || $ key eq 'snapshots' ;
2012-09-07 13:51:19 +04:00
$ raw . = "$key: $conf->{$key}\n" ;
}
return $ raw ;
} ;
2012-03-01 11:13:14 +04:00
2012-09-07 13:51:19 +04:00
my $ raw = & $ generate_raw_config ( $ conf ) ;
2014-11-11 09:40:07 +03:00
if ( scalar ( keys % { $ conf - > { pending } } ) ) {
$ raw . = "\n[PENDING]\n" ;
2015-08-11 12:24:41 +03:00
$ raw . = & $ generate_raw_config ( $ conf - > { pending } , 1 ) ;
2014-11-11 09:40:07 +03:00
}
2012-09-07 13:51:19 +04:00
foreach my $ snapname ( sort keys % { $ conf - > { snapshots } } ) {
$ raw . = "\n[$snapname]\n" ;
$ raw . = & $ generate_raw_config ( $ conf - > { snapshots } - > { $ snapname } ) ;
2012-02-02 17:01:08 +04:00
}
2011-08-23 09:47:04 +04:00
2012-02-02 17:01:08 +04:00
return $ raw ;
}
2011-08-23 09:47:04 +04:00
2011-09-12 14:26:00 +04:00
sub load_defaults {
2011-08-23 09:47:04 +04:00
my $ res = { } ;
# we use static defaults from our JSON schema configuration
foreach my $ key ( keys %$ confdesc ) {
if ( defined ( my $ default = $ confdesc - > { $ key } - > { default } ) ) {
$ res - > { $ key } = $ default ;
}
}
2011-09-12 14:26:00 +04:00
2011-08-23 09:47:04 +04:00
return $ res ;
}
sub config_list {
my $ vmlist = PVE::Cluster:: get_vmlist ( ) ;
my $ res = { } ;
return $ res if ! $ vmlist || ! $ vmlist - > { ids } ;
my $ ids = $ vmlist - > { ids } ;
2019-12-10 13:05:39 +03:00
my $ nodename = nodename ( ) ;
2011-08-23 09:47:04 +04:00
foreach my $ vmid ( keys %$ ids ) {
my $ d = $ ids - > { $ vmid } ;
next if ! $ d - > { node } || $ d - > { node } ne $ nodename ;
2011-09-26 14:20:05 +04:00
next if ! $ d - > { type } || $ d - > { type } ne 'qemu' ;
2011-08-23 09:47:04 +04:00
$ res - > { $ vmid } - > { exists } = 1 ;
}
return $ res ;
}
2011-09-09 14:13:21 +04:00
# test if VM uses local resources (to prevent migration)
sub check_local_resources {
my ( $ conf , $ noerr ) = @ _ ;
2019-05-03 15:22:39 +03:00
my @ loc_res = ( ) ;
2011-09-12 14:26:00 +04:00
2019-05-03 15:22:39 +03:00
push @ loc_res , "hostusb" if $ conf - > { hostusb } ; # old syntax
push @ loc_res , "hostpci" if $ conf - > { hostpci } ; # old syntax
2011-09-09 14:13:21 +04:00
2019-05-03 15:22:39 +03:00
push @ loc_res , "ivshmem" if $ conf - > { ivshmem } ;
2019-02-22 13:38:33 +03:00
2011-09-09 16:18:11 +04:00
foreach my $ k ( keys %$ conf ) {
2019-09-11 15:43:33 +03:00
next if $ k =~ m/^usb/ && ( $ conf - > { $ k } =~ m/^spice(?![^,])/ ) ;
2015-11-09 13:32:02 +03:00
# sockets are safe: they will recreated be on the target side post-migrate
next if $ k =~ m/^serial/ && ( $ conf - > { $ k } eq 'socket' ) ;
2019-05-03 15:22:39 +03:00
push @ loc_res , $ k if $ k =~ m/^(usb|hostpci|serial|parallel)\d+$/ ;
2011-09-09 14:13:21 +04:00
}
2019-05-03 15:22:39 +03:00
die "VM uses local resources\n" if scalar @ loc_res && ! $ noerr ;
2011-09-09 14:13:21 +04:00
2019-05-03 15:22:39 +03:00
return \ @ loc_res ;
2011-09-09 14:13:21 +04:00
}
2013-05-06 10:56:17 +04:00
# check if used storages are available on all nodes (use by migrate)
2012-03-30 11:13:31 +04:00
sub check_storage_availability {
my ( $ storecfg , $ conf , $ node ) = @ _ ;
foreach_drive ( $ conf , sub {
my ( $ ds , $ drive ) = @ _ ;
my $ volid = $ drive - > { file } ;
return if ! $ volid ;
my ( $ sid , $ volname ) = PVE::Storage:: parse_volume_id ( $ volid , 1 ) ;
return if ! $ sid ;
# check if storage is available on both nodes
my $ scfg = PVE::Storage:: storage_check_node ( $ storecfg , $ sid ) ;
PVE::Storage:: storage_check_node ( $ storecfg , $ sid , $ node ) ;
} ) ;
}
2013-05-06 10:56:17 +04:00
# list nodes where all VM images are available (used by has_feature API)
sub shared_nodes {
my ( $ conf , $ storecfg ) = @ _ ;
my $ nodelist = PVE::Cluster:: get_nodelist ( ) ;
my $ nodehash = { map { $ _ = > 1 } @$ nodelist } ;
2019-12-10 13:05:39 +03:00
my $ nodename = nodename ( ) ;
2013-07-15 11:13:31 +04:00
2013-05-06 10:56:17 +04:00
foreach_drive ( $ conf , sub {
my ( $ ds , $ drive ) = @ _ ;
my $ volid = $ drive - > { file } ;
return if ! $ volid ;
my ( $ storeid , $ volname ) = PVE::Storage:: parse_volume_id ( $ volid , 1 ) ;
if ( $ storeid ) {
my $ scfg = PVE::Storage:: storage_config ( $ storecfg , $ storeid ) ;
if ( $ scfg - > { disable } ) {
$ nodehash = { } ;
} elsif ( my $ avail = $ scfg - > { nodes } ) {
foreach my $ node ( keys %$ nodehash ) {
delete $ nodehash - > { $ node } if ! $ avail - > { $ node } ;
}
} elsif ( ! $ scfg - > { shared } ) {
foreach my $ node ( keys %$ nodehash ) {
delete $ nodehash - > { $ node } if $ node ne $ nodename
}
}
}
} ) ;
return $ nodehash
}
2019-06-28 16:13:45 +03:00
sub check_local_storage_availability {
my ( $ conf , $ storecfg ) = @ _ ;
my $ nodelist = PVE::Cluster:: get_nodelist ( ) ;
my $ nodehash = { map { $ _ = > { } } @$ nodelist } ;
foreach_drive ( $ conf , sub {
my ( $ ds , $ drive ) = @ _ ;
my $ volid = $ drive - > { file } ;
return if ! $ volid ;
my ( $ storeid , $ volname ) = PVE::Storage:: parse_volume_id ( $ volid , 1 ) ;
if ( $ storeid ) {
my $ scfg = PVE::Storage:: storage_config ( $ storecfg , $ storeid ) ;
if ( $ scfg - > { disable } ) {
foreach my $ node ( keys %$ nodehash ) {
2019-06-28 18:23:44 +03:00
$ nodehash - > { $ node } - > { unavailable_storages } - > { $ storeid } = 1 ;
2019-06-28 16:13:45 +03:00
}
} elsif ( my $ avail = $ scfg - > { nodes } ) {
foreach my $ node ( keys %$ nodehash ) {
if ( ! $ avail - > { $ node } ) {
2019-06-28 18:23:44 +03:00
$ nodehash - > { $ node } - > { unavailable_storages } - > { $ storeid } = 1 ;
2019-06-28 16:13:45 +03:00
}
}
}
}
} ) ;
2019-06-28 18:23:44 +03:00
foreach my $ node ( values %$ nodehash ) {
if ( my $ unavail = $ node - > { unavailable_storages } ) {
$ node - > { unavailable_storages } = [ sort keys %$ unavail ] ;
}
}
2019-06-28 16:13:45 +03:00
return $ nodehash
}
2019-11-19 14:23:46 +03:00
# Compat only, use assert_config_exists_on_node and vm_running_locally where possible
2011-08-23 09:47:04 +04:00
sub check_running {
2012-08-21 14:21:51 +04:00
my ( $ vmid , $ nocheck , $ node ) = @ _ ;
2011-08-23 09:47:04 +04:00
2019-11-19 14:23:46 +03:00
PVE::QemuConfig:: assert_config_exists_on_node ( $ vmid , $ node ) if ! $ nocheck ;
return PVE::QemuServer::Helpers:: vm_running_locally ( $ vmid ) ;
2011-08-23 09:47:04 +04:00
}
sub vzlist {
2011-09-12 14:26:00 +04:00
2011-08-23 09:47:04 +04:00
my $ vzlist = config_list ( ) ;
2019-11-19 14:23:44 +03:00
my $ fd = IO::Dir - > new ( $ PVE:: QemuServer:: Helpers:: var_run_tmpdir ) || return $ vzlist ;
2011-08-23 09:47:04 +04:00
2011-09-12 14:26:00 +04:00
while ( defined ( my $ de = $ fd - > read ) ) {
2011-08-23 09:47:04 +04:00
next if $ de !~ m/^(\d+)\.pid$/ ;
my $ vmid = $ 1 ;
2011-09-15 11:11:27 +04:00
next if ! defined ( $ vzlist - > { $ vmid } ) ;
if ( my $ pid = check_running ( $ vmid ) ) {
2011-08-23 09:47:04 +04:00
$ vzlist - > { $ vmid } - > { pid } = $ pid ;
}
}
return $ vzlist ;
}
2018-08-01 13:55:29 +03:00
our $ vmstatus_return_properties = {
vmid = > get_standard_option ( 'pve-vmid' ) ,
status = > {
description = > "Qemu process status." ,
type = > 'string' ,
enum = > [ 'stopped' , 'running' ] ,
} ,
maxmem = > {
description = > "Maximum memory in bytes." ,
type = > 'integer' ,
optional = > 1 ,
renderer = > 'bytes' ,
} ,
maxdisk = > {
description = > "Root disk size in bytes." ,
type = > 'integer' ,
optional = > 1 ,
renderer = > 'bytes' ,
} ,
name = > {
description = > "VM name." ,
type = > 'string' ,
optional = > 1 ,
} ,
qmpstatus = > {
description = > "Qemu QMP agent status." ,
type = > 'string' ,
optional = > 1 ,
} ,
pid = > {
description = > "PID of running qemu process." ,
type = > 'integer' ,
optional = > 1 ,
} ,
uptime = > {
description = > "Uptime." ,
type = > 'integer' ,
optional = > 1 ,
renderer = > 'duration' ,
} ,
cpus = > {
description = > "Maximum usable CPUs." ,
type = > 'number' ,
optional = > 1 ,
} ,
2019-03-20 13:29:01 +03:00
lock = > {
2019-03-21 14:53:17 +03:00
description = > "The current config lock, if any." ,
2019-03-20 13:29:01 +03:00
type = > 'string' ,
optional = > 1 ,
2019-10-31 15:36:25 +03:00
} ,
tags = > {
description = > "The current configured tags, if any" ,
type = > 'string' ,
optional = > 1 ,
} ,
2018-08-01 13:55:29 +03:00
} ;
2011-08-23 09:47:04 +04:00
my $ last_proc_pid_stat ;
2012-07-13 11:25:58 +04:00
# get VM status information
# This must be fast and should not block ($full == false)
# We only query KVM using QMP if $full == true (this can be slow)
2011-08-23 09:47:04 +04:00
sub vmstatus {
2012-07-13 11:25:58 +04:00
my ( $ opt_vmid , $ full ) = @ _ ;
2011-08-23 09:47:04 +04:00
my $ res = { } ;
2011-09-12 14:26:00 +04:00
my $ storecfg = PVE::Storage:: config ( ) ;
2011-08-23 09:47:04 +04:00
my $ list = vzlist ( ) ;
2017-12-12 13:56:15 +03:00
my $ defaults = load_defaults ( ) ;
2011-09-14 09:55:34 +04:00
my ( $ uptime ) = PVE::ProcFSTools:: read_proc_uptime ( 1 ) ;
2011-08-23 09:47:04 +04:00
2011-11-18 12:35:32 +04:00
my $ cpucount = $ cpuinfo - > { cpus } || 1 ;
2011-08-23 09:47:04 +04:00
foreach my $ vmid ( keys %$ list ) {
next if $ opt_vmid && ( $ vmid ne $ opt_vmid ) ;
2019-10-23 17:09:36 +03:00
my $ conf = PVE::QemuConfig - > load_config ( $ vmid ) ;
2011-08-23 09:47:04 +04:00
2018-08-01 13:55:29 +03:00
my $ d = { vmid = > $ vmid } ;
2011-08-23 09:47:04 +04:00
$ d - > { pid } = $ list - > { $ vmid } - > { pid } ;
# fixme: better status?
$ d - > { status } = $ list - > { $ vmid } - > { pid } ? 'running' : 'stopped' ;
2020-03-02 13:33:45 +03:00
my $ size = PVE::QemuServer::Drive:: bootdisk_size ( $ storecfg , $ conf ) ;
2012-08-01 15:44:54 +04:00
if ( defined ( $ size ) ) {
$ d - > { disk } = 0 ; # no info available
2011-08-23 09:47:04 +04:00
$ d - > { maxdisk } = $ size ;
} else {
$ d - > { disk } = 0 ;
$ d - > { maxdisk } = 0 ;
}
2017-12-12 13:56:15 +03:00
$ d - > { cpus } = ( $ conf - > { sockets } || $ defaults - > { sockets } )
* ( $ conf - > { cores } || $ defaults - > { cores } ) ;
2011-11-18 12:35:32 +04:00
$ d - > { cpus } = $ cpucount if $ d - > { cpus } > $ cpucount ;
2015-03-08 16:07:41 +03:00
$ d - > { cpus } = $ conf - > { vcpus } if $ conf - > { vcpus } ;
2011-11-18 12:35:32 +04:00
2011-08-23 09:47:04 +04:00
$ d - > { name } = $ conf - > { name } || "VM $vmid" ;
2017-12-12 13:56:15 +03:00
$ d - > { maxmem } = $ conf - > { memory } ? $ conf - > { memory } * ( 1024 * 1024 )
: $ defaults - > { memory } * ( 1024 * 1024 ) ;
2011-08-23 09:47:04 +04:00
2012-12-19 10:24:39 +04:00
if ( $ conf - > { balloon } ) {
2012-12-19 12:08:16 +04:00
$ d - > { balloon_min } = $ conf - > { balloon } * ( 1024 * 1024 ) ;
2017-12-12 13:56:15 +03:00
$ d - > { shares } = defined ( $ conf - > { shares } ) ? $ conf - > { shares }
: $ defaults - > { shares } ;
2012-12-19 10:24:39 +04:00
}
2011-08-23 09:47:04 +04:00
$ d - > { uptime } = 0 ;
$ d - > { cpu } = 0 ;
$ d - > { mem } = 0 ;
$ d - > { netout } = 0 ;
$ d - > { netin } = 0 ;
$ d - > { diskread } = 0 ;
$ d - > { diskwrite } = 0 ;
2016-03-07 14:41:12 +03:00
$ d - > { template } = PVE::QemuConfig - > is_template ( $ conf ) ;
2013-02-14 14:58:56 +04:00
2018-01-26 13:57:59 +03:00
$ d - > { serial } = 1 if conf_has_serial ( $ conf ) ;
2019-03-20 13:29:01 +03:00
$ d - > { lock } = $ conf - > { lock } if $ conf - > { lock } ;
2019-10-31 15:36:25 +03:00
$ d - > { tags } = $ conf - > { tags } if defined ( $ conf - > { tags } ) ;
2018-01-26 13:57:59 +03:00
2011-08-23 09:47:04 +04:00
$ res - > { $ vmid } = $ d ;
}
my $ netdev = PVE::ProcFSTools:: read_proc_net_dev ( ) ;
foreach my $ dev ( keys %$ netdev ) {
next if $ dev !~ m/^tap([1-9]\d*)i/ ;
my $ vmid = $ 1 ;
my $ d = $ res - > { $ vmid } ;
next if ! $ d ;
2011-09-12 14:26:00 +04:00
2011-08-23 09:47:04 +04:00
$ d - > { netout } += $ netdev - > { $ dev } - > { receive } ;
$ d - > { netin } += $ netdev - > { $ dev } - > { transmit } ;
2015-06-16 13:44:49 +03:00
if ( $ full ) {
$ d - > { nics } - > { $ dev } - > { netout } = $ netdev - > { $ dev } - > { receive } ;
$ d - > { nics } - > { $ dev } - > { netin } = $ netdev - > { $ dev } - > { transmit } ;
}
2011-08-23 09:47:04 +04:00
}
my $ ctime = gettimeofday ;
foreach my $ vmid ( keys %$ list ) {
my $ d = $ res - > { $ vmid } ;
my $ pid = $ d - > { pid } ;
next if ! $ pid ;
2011-09-14 09:55:34 +04:00
my $ pstat = PVE::ProcFSTools:: read_proc_pid_stat ( $ pid ) ;
next if ! $ pstat ; # not running
2011-09-12 14:26:00 +04:00
2011-09-14 09:55:34 +04:00
my $ used = $ pstat - > { utime } + $ pstat - > { stime } ;
2011-08-23 09:47:04 +04:00
2011-09-14 09:55:34 +04:00
$ d - > { uptime } = int ( ( $ uptime - $ pstat - > { starttime } ) / $ cpuinfo - > { user_hz } ) ;
2011-08-23 09:47:04 +04:00
2011-09-14 09:55:34 +04:00
if ( $ pstat - > { vsize } ) {
2011-09-15 11:11:27 +04:00
$ d - > { mem } = int ( ( $ pstat - > { rss } / $ pstat - > { vsize } ) * $ d - > { maxmem } ) ;
2011-08-23 09:47:04 +04:00
}
my $ old = $ last_proc_pid_stat - > { $ pid } ;
if ( ! $ old ) {
2011-09-12 14:26:00 +04:00
$ last_proc_pid_stat - > { $ pid } = {
time = > $ ctime ,
2011-08-23 09:47:04 +04:00
used = > $ used ,
cpu = > 0 ,
} ;
next ;
}
2011-09-12 13:03:14 +04:00
my $ dtime = ( $ ctime - $ old - > { time } ) * $ cpucount * $ cpuinfo - > { user_hz } ;
2011-08-23 09:47:04 +04:00
if ( $ dtime > 1000 ) {
my $ dutime = $ used - $ old - > { used } ;
2011-11-18 12:35:32 +04:00
$ d - > { cpu } = ( ( $ dutime /$dtime)* $cpucount) / $ d - > { cpus } ;
2011-08-23 09:47:04 +04:00
$ last_proc_pid_stat - > { $ pid } = {
2011-09-12 14:26:00 +04:00
time = > $ ctime ,
2011-08-23 09:47:04 +04:00
used = > $ used ,
cpu = > $ d - > { cpu } ,
} ;
} else {
$ d - > { cpu } = $ old - > { cpu } ;
}
}
2012-08-23 09:36:48 +04:00
return $ res if ! $ full ;
2012-07-13 11:25:58 +04:00
my $ qmpclient = PVE::QMPClient - > new ( ) ;
2012-12-18 15:36:18 +04:00
my $ ballooncb = sub {
my ( $ vmid , $ resp ) = @ _ ;
my $ info = $ resp - > { 'return' } ;
2015-03-09 10:14:03 +03:00
return if ! $ info - > { max_mem } ;
2012-12-18 15:36:18 +04:00
my $ d = $ res - > { $ vmid } ;
2015-03-09 10:14:03 +03:00
# use memory assigned to VM
$ d - > { maxmem } = $ info - > { max_mem } ;
$ d - > { balloon } = $ info - > { actual } ;
if ( defined ( $ info - > { total_mem } ) && defined ( $ info - > { free_mem } ) ) {
$ d - > { mem } = $ info - > { total_mem } - $ info - > { free_mem } ;
$ d - > { freemem } = $ info - > { free_mem } ;
2012-12-18 15:36:18 +04:00
}
2015-06-16 13:44:49 +03:00
$ d - > { ballooninfo } = $ info ;
2012-12-18 15:36:18 +04:00
} ;
2012-07-13 11:25:58 +04:00
my $ blockstatscb = sub {
my ( $ vmid , $ resp ) = @ _ ;
my $ data = $ resp - > { 'return' } || [] ;
my $ totalrdbytes = 0 ;
my $ totalwrbytes = 0 ;
2015-06-16 13:44:49 +03:00
2012-07-13 11:25:58 +04:00
for my $ blockstat ( @$ data ) {
$ totalrdbytes = $ totalrdbytes + $ blockstat - > { stats } - > { rd_bytes } ;
$ totalwrbytes = $ totalwrbytes + $ blockstat - > { stats } - > { wr_bytes } ;
2015-06-16 13:44:49 +03:00
$ blockstat - > { device } =~ s/drive-// ;
$ res - > { $ vmid } - > { blockstat } - > { $ blockstat - > { device } } = $ blockstat - > { stats } ;
2012-07-13 11:25:58 +04:00
}
$ res - > { $ vmid } - > { diskread } = $ totalrdbytes ;
$ res - > { $ vmid } - > { diskwrite } = $ totalwrbytes ;
} ;
my $ statuscb = sub {
my ( $ vmid , $ resp ) = @ _ ;
2012-12-18 15:36:18 +04:00
2012-07-13 11:25:58 +04:00
$ qmpclient - > queue_cmd ( $ vmid , $ blockstatscb , 'query-blockstats' ) ;
2012-12-18 15:36:18 +04:00
# this fails if ballon driver is not loaded, so this must be
# the last commnand (following command are aborted if this fails).
2015-03-09 10:14:03 +03:00
$ qmpclient - > queue_cmd ( $ vmid , $ ballooncb , 'query-balloon' ) ;
2012-07-13 11:25:58 +04:00
my $ status = 'unknown' ;
if ( ! defined ( $ status = $ resp - > { 'return' } - > { status } ) ) {
warn "unable to get VM status\n" ;
return ;
}
$ res - > { $ vmid } - > { qmpstatus } = $ resp - > { 'return' } - > { status } ;
} ;
foreach my $ vmid ( keys %$ list ) {
next if $ opt_vmid && ( $ vmid ne $ opt_vmid ) ;
next if ! $ res - > { $ vmid } - > { pid } ; # not running
$ qmpclient - > queue_cmd ( $ vmid , $ statuscb , 'query-status' ) ;
}
2016-12-20 12:11:56 +03:00
$ qmpclient - > queue_execute ( undef , 2 ) ;
2012-07-13 11:25:58 +04:00
foreach my $ vmid ( keys %$ list ) {
next if $ opt_vmid && ( $ vmid ne $ opt_vmid ) ;
$ res - > { $ vmid } - > { qmpstatus } = $ res - > { $ vmid } - > { status } if ! $ res - > { $ vmid } - > { qmpstatus } ;
}
2011-08-23 09:47:04 +04:00
return $ res ;
}
2018-01-26 13:57:59 +03:00
sub conf_has_serial {
my ( $ conf ) = @ _ ;
for ( my $ i = 0 ; $ i < $ MAX_SERIAL_PORTS ; $ i + + ) {
if ( $ conf - > { "serial$i" } ) {
return 1 ;
}
}
return 0 ;
}
2019-07-23 19:09:32 +03:00
sub conf_has_audio {
my ( $ conf , $ id ) = @ _ ;
$ id // = 0 ;
my $ audio = $ conf - > { "audio$id" } ;
return undef if ! defined ( $ audio ) ;
my $ audioproperties = PVE::JSONSchema:: parse_property_string ( $ audio_fmt , $ audio ) ;
my $ audiodriver = $ audioproperties - > { driver } // 'spice' ;
return {
dev = > $ audioproperties - > { device } ,
2019-07-24 16:06:20 +03:00
dev_id = > "audiodev$id" ,
2019-07-23 19:09:32 +03:00
backend = > $ audiodriver ,
backend_id = > "$audiodriver-backend${id}" ,
} ;
}
2013-07-24 13:42:48 +04:00
sub vga_conf_has_spice {
my ( $ vga ) = @ _ ;
2018-11-09 15:31:09 +03:00
my $ vgaconf = parse_vga ( $ vga ) ;
my $ vgatype = $ vgaconf - > { type } ;
return 0 if ! $ vgatype || $ vgatype !~ m/^qxl([234])?$/ ;
2013-10-02 11:11:57 +04:00
return $ 1 || 1 ;
2013-07-24 13:42:48 +04:00
}
2018-11-12 16:10:34 +03:00
sub is_native ($) {
my ( $ arch ) = @ _ ;
return get_host_arch ( ) eq $ arch ;
}
2019-11-25 10:56:58 +03:00
sub get_vm_arch {
my ( $ conf ) = @ _ ;
return $ conf - > { arch } // get_host_arch ( ) ;
}
2018-11-12 16:10:34 +03:00
my $ default_machines = {
x86_64 = > 'pc' ,
aarch64 = > 'virt' ,
} ;
2019-11-25 10:56:58 +03:00
sub get_vm_machine {
Use 'QEMU version' -> '+pve-version' mapping for machine types
The previously introduced approach can fail for pinned versions when a
new QEMU release is introduced. The saner approach is to use a mapping
that gives one pve-version for each QEMU release.
Fortunately, the old system has not been bumped yet, so we can still
change it without too much effort.
QEMU versions without a mapping are assumed to be pve0, 4.1 is mapped to
pve1 since thats what we had as our default previously.
Pinned machine versions (i.e. pc-i440fx-4.1) are always assumed to be
pve0, for specific pve-versions they'd have to be pinned as well (i.e.
pc-i440fx-4.1+pve1).
The new logic also makes the pve-version dynamic, and starts VMs with
the lowest possible 'feature-level', i.e. if a feature is only available
with 4.1+pve2, but the VM isn't using it, we still start it with
4.1+pve0.
We die if we don't support a version that is requested from us. This
allows us to use the pve-version as live-migration blocks (i.e. bumping
the version and then live-migrating a VM which uses the new feature (so
is running with the bumped version) to an outdated node will present the
user with a helpful error message and fail instead of silently modifying
the config and only failing *after* the migration).
$version_guard is introduced in config_to_command to use for features
that need to check pve-version, it automatically handles selecting the
newest necessary pve-version for the VM.
Tests have to be adjusted, since all of them now resolve to pve0 instead
of pve1. EXPECT_ERROR matching is changed to use 'eq' instead of regex
to allow special characters in error messages.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-02-10 18:05:35 +03:00
my ( $ conf , $ forcemachine , $ arch , $ add_pve_version , $ kvmversion ) = @ _ ;
2019-11-25 10:56:58 +03:00
my $ machine = $ forcemachine || $ conf - > { machine } ;
2018-11-12 16:10:34 +03:00
implement PVE Version addition for QEMU machine
With our QEMU 4.1.1 package we can pass a additional internal version
to QEMU's machine, it will be split out there and ignored, but
returned on a QMP 'query-machines' call.
This allows us to use it for increasing the granularity with which we
can roll-out HW layout changes/additions for VMs. Until now we
required a machine version bump, happening normally every major
release of QEMU, with seldom, for us irrelevant, exceptions.
This often delays rolling out a feature, which would break
live-migration, by several months. That can now be avoided, the new
"pve-version" component of the machine can be bumped at will, and
thus we are much more flexible.
That versions orders after the ($major, $minor) version components
from an stable release - it can thus also be reset on the next
release.
The implementation extends the qemu-machine REGEX, remembers
"pve-version" when doing a "query-machines" and integrates support
into the min_version and extract_version helpers.
We start out with a version of 1.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
Reviewed-by: Stefan Reiter <s.reiter@proxmox.com>
2019-11-25 13:18:13 +03:00
if ( ! $ machine || $ machine =~ m/^(?:pc|q35|virt)$/ ) {
2019-11-25 10:56:58 +03:00
$ arch // = 'x86_64' ;
$ machine || = $ default_machines - > { $ arch } ;
Use 'QEMU version' -> '+pve-version' mapping for machine types
The previously introduced approach can fail for pinned versions when a
new QEMU release is introduced. The saner approach is to use a mapping
that gives one pve-version for each QEMU release.
Fortunately, the old system has not been bumped yet, so we can still
change it without too much effort.
QEMU versions without a mapping are assumed to be pve0, 4.1 is mapped to
pve1 since thats what we had as our default previously.
Pinned machine versions (i.e. pc-i440fx-4.1) are always assumed to be
pve0, for specific pve-versions they'd have to be pinned as well (i.e.
pc-i440fx-4.1+pve1).
The new logic also makes the pve-version dynamic, and starts VMs with
the lowest possible 'feature-level', i.e. if a feature is only available
with 4.1+pve2, but the VM isn't using it, we still start it with
4.1+pve0.
We die if we don't support a version that is requested from us. This
allows us to use the pve-version as live-migration blocks (i.e. bumping
the version and then live-migrating a VM which uses the new feature (so
is running with the bumped version) to an outdated node will present the
user with a helpful error message and fail instead of silently modifying
the config and only failing *after* the migration).
$version_guard is introduced in config_to_command to use for features
that need to check pve-version, it automatically handles selecting the
newest necessary pve-version for the VM.
Tests have to be adjusted, since all of them now resolve to pve0 instead
of pve1. EXPECT_ERROR matching is changed to use 'eq' instead of regex
to allow special characters in error messages.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-02-10 18:05:35 +03:00
if ( $ add_pve_version ) {
$ kvmversion // = kvm_user_version ( ) ;
my $ pvever = PVE::QemuServer::Machine:: get_pve_version ( $ kvmversion ) ;
$ machine . = "+pve$pvever" ;
}
}
if ( $ add_pve_version && $ machine !~ m/\+pve\d+$/ ) {
# for version-pinned machines that do not include a pve-version (e.g.
# pc-q35-4.1), we assume 0 to keep them stable in case we bump
$ machine . = '+pve0' ;
2019-11-25 10:56:58 +03:00
}
return $ machine ;
2018-11-12 16:10:34 +03:00
}
2018-11-12 16:10:36 +03:00
sub get_ovmf_files ($) {
my ( $ arch ) = @ _ ;
my $ ovmf = $ OVMF - > { $ arch }
or die "no OVMF images known for architecture '$arch'\n" ;
return @$ ovmf ;
}
2018-11-12 16:10:37 +03:00
my $ Arch2Qemu = {
aarch64 = > '/usr/bin/qemu-system-aarch64' ,
x86_64 = > '/usr/bin/qemu-system-x86_64' ,
} ;
sub get_command_for_arch ($) {
my ( $ arch ) = @ _ ;
return '/usr/bin/kvm' if is_native ( $ arch ) ;
my $ cmd = $ Arch2Qemu - > { $ arch }
or die "don't know how to emulate architecture '$arch'\n" ;
return $ cmd ;
}
2019-11-21 17:53:42 +03:00
# To use query_supported_cpu_flags and query_understood_cpu_flags to get flags
# to use in a QEMU command line (-cpu element), first array_intersect the result
# of query_supported_ with query_understood_. This is necessary because:
#
# a) query_understood_ returns flags the host cannot use and
# b) query_supported_ (rather the QMP call) doesn't actually return CPU
# flags, but CPU settings - with most of them being flags. Those settings
# (and some flags, curiously) cannot be specified as a "-cpu" argument.
#
# query_supported_ needs to start up to 2 temporary VMs and is therefore rather
# expensive. If you need the value returned from this, you can get it much
# cheaper from pmxcfs using PVE::Cluster::get_node_kv('cpuflags-$accel') with
# $accel being 'kvm' or 'tcg'.
#
# pvestatd calls this function on startup and whenever the QEMU/KVM version
# changes, automatically populating pmxcfs.
#
# Returns: { kvm => [ flagX, flagY, ... ], tcg => [ flag1, flag2, ... ] }
# since kvm and tcg machines support different flags
#
sub query_supported_cpu_flags {
2020-01-15 17:36:54 +03:00
my ( $ arch ) = @ _ ;
2019-11-21 17:53:42 +03:00
2020-01-15 17:36:54 +03:00
$ arch // = get_host_arch ( ) ;
my $ default_machine = $ default_machines - > { $ arch } ;
my $ flags = { } ;
2019-11-21 17:53:42 +03:00
# FIXME: Once this is merged, the code below should work for ARM as well:
# https://lists.nongnu.org/archive/html/qemu-devel/2019-06/msg04947.html
die "QEMU/KVM cannot detect CPU flags on ARM (aarch64)\n" if
$ arch eq "aarch64" ;
my $ kvm_supported = defined ( kvm_version ( ) ) ;
my $ qemu_cmd = get_command_for_arch ( $ arch ) ;
my $ fakevmid = - 1 ;
my $ pidfile = PVE::QemuServer::Helpers:: pidfile_name ( $ fakevmid ) ;
# Start a temporary (frozen) VM with vmid -1 to allow sending a QMP command
my $ query_supported_run_qemu = sub {
my ( $ kvm ) = @ _ ;
my $ flags = { } ;
my $ cmd = [
$ qemu_cmd ,
'-machine' , $ default_machine ,
'-display' , 'none' ,
'-chardev' , "socket,id=qmp,path=/var/run/qemu-server/$fakevmid.qmp,server,nowait" ,
'-mon' , 'chardev=qmp,mode=control' ,
'-pidfile' , $ pidfile ,
'-S' , '-daemonize'
] ;
if ( ! $ kvm ) {
push @$ cmd , '-accel' , 'tcg' ;
}
my $ rc = run_command ( $ cmd , noerr = > 1 , quiet = > 0 ) ;
die "QEMU flag querying VM exited with code " . $ rc if $ rc ;
eval {
my $ cmd_result = mon_cmd (
$ fakevmid ,
'query-cpu-model-expansion' ,
type = > 'full' ,
model = > { name = > 'host' }
) ;
my $ props = $ cmd_result - > { model } - > { props } ;
foreach my $ prop ( keys %$ props ) {
next if $ props - > { $ prop } ne '1' ;
# QEMU returns some flags multiple times, with '_', '.' or '-'
# (e.g. lahf_lm and lahf-lm; sse4.2, sse4-2 and sse4_2; ...).
# We only keep those with underscores, to match /proc/cpuinfo
$ prop =~ s/\.|-/_/g ;
$ flags - > { $ prop } = 1 ;
}
} ;
my $ err = $@ ;
# force stop with 10 sec timeout and 'nocheck'
# always stop, even if QMP failed
vm_stop ( undef , $ fakevmid , 1 , 1 , 10 , 0 , 1 ) ;
die $ err if $ err ;
return [ sort keys %$ flags ] ;
} ;
# We need to query QEMU twice, since KVM and TCG have different supported flags
PVE::QemuConfig - > lock_config ( $ fakevmid , sub {
$ flags - > { tcg } = eval { $ query_supported_run_qemu - > ( 0 ) } ;
warn "warning: failed querying supported tcg flags: $@\n" if $@ ;
if ( $ kvm_supported ) {
$ flags - > { kvm } = eval { $ query_supported_run_qemu - > ( 1 ) } ;
warn "warning: failed querying supported kvm flags: $@\n" if $@ ;
}
} ) ;
return $ flags ;
}
# Understood CPU flags are written to a file at 'pve-qemu' compile time
my $ understood_cpu_flag_dir = "/usr/share/kvm" ;
sub query_understood_cpu_flags {
my $ arch = get_host_arch ( ) ;
my $ filepath = "$understood_cpu_flag_dir/recognized-CPUID-flags-$arch" ;
die "Cannot query understood QEMU CPU flags for architecture: $arch (file not found)\n"
if ! - e $ filepath ;
my $ raw = file_get_contents ( $ filepath ) ;
$ raw =~ s/^\s+|\s+$//g ;
my @ flags = split ( /\s+/ , $ raw ) ;
return \ @ flags ;
}
2011-08-23 09:47:04 +04:00
sub config_to_command {
2015-10-29 09:37:00 +03:00
my ( $ storecfg , $ vmid , $ conf , $ defaults , $ forcemachine ) = @ _ ;
2011-08-23 09:47:04 +04:00
my $ cmd = [] ;
2012-09-26 14:42:03 +04:00
my $ globalFlags = [] ;
my $ machineFlags = [] ;
my $ rtcFlags = [] ;
2012-08-20 13:10:24 +04:00
my $ devices = [] ;
2011-10-03 16:53:09 +04:00
my $ pciaddr = '' ;
2012-08-20 13:10:24 +04:00
my $ bridges = { } ;
2016-02-18 14:54:06 +03:00
my $ ostype = $ conf - > { ostype } ;
2016-11-08 04:56:01 +03:00
my $ winversion = windows_version ( $ ostype ) ;
2018-11-12 16:10:34 +03:00
my $ kvm = $ conf - > { kvm } ;
2019-12-10 13:05:39 +03:00
my $ nodename = nodename ( ) ;
2018-11-12 16:10:34 +03:00
2019-11-25 10:56:58 +03:00
my $ arch = get_vm_arch ( $ conf ) ;
2019-08-13 16:19:07 +03:00
my $ kvm_binary = get_command_for_arch ( $ arch ) ;
my $ kvmver = kvm_user_version ( $ kvm_binary ) ;
2019-11-25 10:56:58 +03:00
2020-02-12 13:10:56 +03:00
if ( ! $ kvmver || $ kvmver !~ m/^(\d+)\.(\d+)/ || $ 1 < 3 ) {
$ kvmver // = "undefined" ;
die "Detected old QEMU binary ('$kvmver', at least 3.0 is required)\n" ;
}
implement PVE Version addition for QEMU machine
With our QEMU 4.1.1 package we can pass a additional internal version
to QEMU's machine, it will be split out there and ignored, but
returned on a QMP 'query-machines' call.
This allows us to use it for increasing the granularity with which we
can roll-out HW layout changes/additions for VMs. Until now we
required a machine version bump, happening normally every major
release of QEMU, with seldom, for us irrelevant, exceptions.
This often delays rolling out a feature, which would break
live-migration, by several months. That can now be avoided, the new
"pve-version" component of the machine can be bumped at will, and
thus we are much more flexible.
That versions orders after the ($major, $minor) version components
from an stable release - it can thus also be reset on the next
release.
The implementation extends the qemu-machine REGEX, remembers
"pve-version" when doing a "query-machines" and integrates support
into the min_version and extract_version helpers.
We start out with a version of 1.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
Reviewed-by: Stefan Reiter <s.reiter@proxmox.com>
2019-11-25 13:18:13 +03:00
my $ add_pve_version = min_version ( $ kvmver , 4 , 1 ) ;
my $ machine_type = get_vm_machine ( $ conf , $ forcemachine , $ arch , $ add_pve_version ) ;
my $ machine_version = PVE::QemuServer::Machine:: extract_version ( $ machine_type , $ kvmver ) ;
2018-11-12 16:10:34 +03:00
$ kvm // = 1 if is_native ( $ arch ) ;
2016-11-08 04:56:01 +03:00
2019-12-09 18:14:10 +03:00
$ machine_version =~ m/(\d+)\.(\d+)/ ;
Use 'QEMU version' -> '+pve-version' mapping for machine types
The previously introduced approach can fail for pinned versions when a
new QEMU release is introduced. The saner approach is to use a mapping
that gives one pve-version for each QEMU release.
Fortunately, the old system has not been bumped yet, so we can still
change it without too much effort.
QEMU versions without a mapping are assumed to be pve0, 4.1 is mapped to
pve1 since thats what we had as our default previously.
Pinned machine versions (i.e. pc-i440fx-4.1) are always assumed to be
pve0, for specific pve-versions they'd have to be pinned as well (i.e.
pc-i440fx-4.1+pve1).
The new logic also makes the pve-version dynamic, and starts VMs with
the lowest possible 'feature-level', i.e. if a feature is only available
with 4.1+pve2, but the VM isn't using it, we still start it with
4.1+pve0.
We die if we don't support a version that is requested from us. This
allows us to use the pve-version as live-migration blocks (i.e. bumping
the version and then live-migrating a VM which uses the new feature (so
is running with the bumped version) to an outdated node will present the
user with a helpful error message and fail instead of silently modifying
the config and only failing *after* the migration).
$version_guard is introduced in config_to_command to use for features
that need to check pve-version, it automatically handles selecting the
newest necessary pve-version for the VM.
Tests have to be adjusted, since all of them now resolve to pve0 instead
of pve1. EXPECT_ERROR matching is changed to use 'eq' instead of regex
to allow special characters in error messages.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-02-10 18:05:35 +03:00
my ( $ machine_major , $ machine_minor ) = ( $ 1 , $ 2 ) ;
2019-12-10 13:06:17 +03:00
die "Installed QEMU version '$kvmver' is too old to run machine type '$machine_type', please upgrade node '$nodename'\n"
Use 'QEMU version' -> '+pve-version' mapping for machine types
The previously introduced approach can fail for pinned versions when a
new QEMU release is introduced. The saner approach is to use a mapping
that gives one pve-version for each QEMU release.
Fortunately, the old system has not been bumped yet, so we can still
change it without too much effort.
QEMU versions without a mapping are assumed to be pve0, 4.1 is mapped to
pve1 since thats what we had as our default previously.
Pinned machine versions (i.e. pc-i440fx-4.1) are always assumed to be
pve0, for specific pve-versions they'd have to be pinned as well (i.e.
pc-i440fx-4.1+pve1).
The new logic also makes the pve-version dynamic, and starts VMs with
the lowest possible 'feature-level', i.e. if a feature is only available
with 4.1+pve2, but the VM isn't using it, we still start it with
4.1+pve0.
We die if we don't support a version that is requested from us. This
allows us to use the pve-version as live-migration blocks (i.e. bumping
the version and then live-migrating a VM which uses the new feature (so
is running with the bumped version) to an outdated node will present the
user with a helpful error message and fail instead of silently modifying
the config and only failing *after* the migration).
$version_guard is introduced in config_to_command to use for features
that need to check pve-version, it automatically handles selecting the
newest necessary pve-version for the VM.
Tests have to be adjusted, since all of them now resolve to pve0 instead
of pve1. EXPECT_ERROR matching is changed to use 'eq' instead of regex
to allow special characters in error messages.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-02-10 18:05:35 +03:00
if ! PVE::QemuServer:: min_version ( $ kvmver , $ machine_major , $ machine_minor ) ;
if ( ! PVE::QemuServer::Machine:: can_run_pve_machine_version ( $ machine_version , $ kvmver ) ) {
my $ max_pve_version = PVE::QemuServer::Machine:: get_pve_version ( $ machine_version ) ;
die "Installed qemu-server (max feature level for $machine_major.$machine_minor is pve$max_pve_version)"
. " is too old to run machine type '$machine_type', please upgrade node '$nodename'\n" ;
}
# if a specific +pve version is required for a feature, use $version_guard
# instead of min_version to allow machines to be run with the minimum
# required version
my $ required_pve_version = 0 ;
my $ version_guard = sub {
my ( $ major , $ minor , $ pve ) = @ _ ;
return 0 if ! min_version ( $ machine_version , $ major , $ minor , $ pve ) ;
$ required_pve_version = $ pve if $ pve && $ pve > $ required_pve_version ;
return 1 ;
} ;
2019-12-09 18:14:10 +03:00
2018-11-12 16:10:34 +03:00
if ( $ kvm ) {
die "KVM virtualisation configured, but not available. Either disable in VM configuration or enable in BIOS.\n"
if ! defined kvm_version ( ) ;
}
2017-08-21 12:47:18 +03:00
2019-11-19 14:23:48 +03:00
my $ q35 = PVE::QemuServer::Machine:: machine_type_is_q35 ( $ conf ) ;
memory hotplug patch v10
This patch allow to hotplug memory dimm modules
though a new option : dimm_memory
The dimm modules are generated from a map
dimmid size dimm_memory
dimm0 512 512 100.00 0
dimm1 512 1024 50.00 1
dimm2 512 1536 33.33 2
dimm3 512 2048 25.00 3
dimm4 512 2560 20.00 0
dimm5 512 3072 16.67 1
dimm6 512 3584 14.29 2
dimm7 512 4096 12.50 3
dimm8 512 4608 11.11 0
dimm9 512 5120 10.00 1
dimm10 512 5632 9.09 2
dimm11 512 6144 8.33 3
dimm12 512 6656 7.69 0
dimm13 512 7168 7.14 1
dimm14 512 7680 6.67 2
dimm15 512 8192 6.25 3
dimm16 512 8704 5.88 0
dimm17 512 9216 5.56 1
dimm18 512 9728 5.26 2
dimm19 512 10240 5.00 3
dimm20 512 10752 4.76 0
...
dimm241 65536 3260416 2.01 1
dimm242 65536 3325952 1.97 2
dimm243 65536 3391488 1.93 3
dimm244 65536 3457024 1.90 0
dimm245 65536 3522560 1.86 1
dimm246 65536 3588096 1.83 2
dimm247 65536 3653632 1.79 3
dimm248 65536 3719168 1.76 0
dimm249 65536 3784704 1.73 1
dimm250 65536 3850240 1.70 2
dimm251 65536 3915776 1.67 3
dimm252 65536 3981312 1.65 0
dimm253 65536 4046848 1.62 1
dimm254 65536 4112384 1.59 2
dimm255 65536 4177920 1.57 3
max dimm_memory size is 4TB, which is the current qemu limit
If the dimm_memory value is not aligned on memory module, we align the dimm_memory on the next module.
vmid.conf
---------
memory: 1024
numa:1
hotplug: memmory
when hotplug memory option is enabled, the minimum memory value must be 1GB, and also numa need to be enabled.
we assign the first 1GB as static memory, splitted on each numa nodes.
The remaining memory is assigned on hotpluggable dimm devices.
The static memory need to be also 128MB aligned, to have other dimm devices aligned too.
This 128MB alignment is a linux limitation, windows can align on 2MB size.
Numa need to be aligned, as linux guest don't boot on some setup with multi sockets,
and windows need numa to be able to hotplug memory
hotplug
----
qm set <vmid> -memory X (where X is bigger than current value)
unplug (not yet implemented in qemu)
------
qm set <vmid> -memory X (where X is lower than current value)
linux guest
-----------
-acpi hotplug module should be loaded in guest
-need a recent kernel. (tested with 3.10)
can be enable automaticaly, adding:
/lib/udev/rules.d/80-hotplug-cpu-mem.rules
SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", \
ATTR{online}="1"
SUBSYSTEM=="memory", ACTION=="add", TEST=="state", ATTR{state}=="offline", \
ATTR{state}="online"
windows guest
-------------
tested with:
- windows 2012 standard
- windows 2008 enterprise/datacenter
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2015-01-28 08:47:24 +03:00
my $ hotplug_features = parse_hotplug_features ( defined ( $ conf - > { hotplug } ) ? $ conf - > { hotplug } : '1' ) ;
2015-11-06 12:27:05 +03:00
my $ use_old_bios_files = undef ;
( $ use_old_bios_files , $ machine_type ) = qemu_use_old_bios_files ( $ machine_type ) ;
2014-06-18 08:54:45 +04:00
2015-05-28 16:59:21 +03:00
my $ cpuunits = defined ( $ conf - > { cpuunits } ) ?
$ conf - > { cpuunits } : $ defaults - > { cpuunits } ;
2019-08-13 16:19:07 +03:00
push @$ cmd , $ kvm_binary ;
2011-08-23 09:47:04 +04:00
push @$ cmd , '-id' , $ vmid ;
2018-03-12 15:28:14 +03:00
my $ vmname = $ conf - > { name } || "vm$vmid" ;
push @$ cmd , '-name' , $ vmname ;
2011-08-23 09:47:04 +04:00
my $ use_virtio = 0 ;
2019-11-19 14:23:44 +03:00
my $ qmpsocket = PVE::QemuServer::Helpers:: qmp_socket ( $ vmid ) ;
2012-05-29 16:01:50 +04:00
push @$ cmd , '-chardev' , "socket,id=qmp,path=$qmpsocket,server,nowait" ;
push @$ cmd , '-mon' , "chardev=qmp,mode=control" ;
2019-11-19 14:23:49 +03:00
if ( min_version ( $ machine_version , 2 , 12 ) ) {
2018-11-14 16:59:58 +03:00
push @$ cmd , '-chardev' , "socket,id=qmp-event,path=/var/run/qmeventd.sock,reconnect=5" ;
2018-10-17 15:31:19 +03:00
push @$ cmd , '-mon' , "chardev=qmp-event,mode=control" ;
}
2011-08-23 09:47:04 +04:00
2019-11-19 14:23:44 +03:00
push @$ cmd , '-pidfile' , PVE::QemuServer::Helpers:: pidfile_name ( $ vmid ) ;
2011-09-12 14:26:00 +04:00
2011-08-23 09:47:04 +04:00
push @$ cmd , '-daemonize' ;
2014-06-26 13:12:25 +04:00
if ( $ conf - > { smbios1 } ) {
2019-06-11 13:13:52 +03:00
my $ smbios_conf = parse_smbios1 ( $ conf - > { smbios1 } ) ;
if ( $ smbios_conf - > { base64 } ) {
# Do not pass base64 flag to qemu
delete $ smbios_conf - > { base64 } ;
my $ smbios_string = "" ;
foreach my $ key ( keys %$ smbios_conf ) {
my $ value ;
if ( $ key eq "uuid" ) {
$ value = $ smbios_conf - > { uuid }
} else {
$ value = decode_base64 ( $ smbios_conf - > { $ key } ) ;
}
# qemu accepts any binary data, only commas need escaping by double comma
$ value =~ s/,/,,/g ;
$ smbios_string . = "," . $ key . "=" . $ value if $ value ;
}
push @$ cmd , '-smbios' , "type=1" . $ smbios_string ;
} else {
push @$ cmd , '-smbios' , "type=1,$conf->{smbios1}" ;
}
2014-06-26 13:12:25 +04:00
}
2018-11-12 16:10:36 +03:00
my ( $ ovmf_code , $ ovmf_vars ) = get_ovmf_files ( $ arch ) ;
2015-12-10 12:48:04 +03:00
if ( $ conf - > { bios } && $ conf - > { bios } eq 'ovmf' ) {
2018-11-12 16:10:36 +03:00
die "uefi base image not found\n" if ! - f $ ovmf_code ;
2016-09-08 12:03:01 +03:00
2017-09-11 09:40:29 +03:00
my $ path ;
2017-12-28 16:25:43 +03:00
my $ format ;
2017-09-11 09:40:29 +03:00
if ( my $ efidisk = $ conf - > { efidisk0 } ) {
2020-03-02 13:33:43 +03:00
my $ d = parse_drive ( 'efidisk0' , $ efidisk ) ;
2016-09-08 12:03:01 +03:00
my ( $ storeid , $ volname ) = PVE::Storage:: parse_volume_id ( $ d - > { file } , 1 ) ;
2017-12-28 16:25:43 +03:00
$ format = $ d - > { format } ;
2016-09-08 12:03:01 +03:00
if ( $ storeid ) {
$ path = PVE::Storage:: path ( $ storecfg , $ d - > { file } ) ;
2017-12-28 16:25:43 +03:00
if ( ! defined ( $ format ) ) {
my $ scfg = PVE::Storage:: storage_config ( $ storecfg , $ storeid ) ;
$ format = qemu_img_format ( $ scfg , $ volname ) ;
}
2016-09-08 12:03:01 +03:00
} else {
$ path = $ d - > { file } ;
2017-12-28 16:25:43 +03:00
die "efidisk format must be specified\n"
if ! defined ( $ format ) ;
2016-09-08 12:03:01 +03:00
}
} else {
2017-09-11 09:40:29 +03:00
warn "no efidisk configured! Using temporary efivars disk.\n" ;
$ path = "/tmp/$vmid-ovmf.fd" ;
2018-11-12 16:10:36 +03:00
PVE::Tools:: file_copy ( $ ovmf_vars , $ path , - s $ ovmf_vars ) ;
2017-12-28 16:25:43 +03:00
$ format = 'raw' ;
2016-09-08 12:03:01 +03:00
}
2017-09-11 09:40:29 +03:00
2018-11-12 16:10:36 +03:00
push @$ cmd , '-drive' , "if=pflash,unit=0,format=raw,readonly,file=$ovmf_code" ;
2017-11-07 10:18:56 +03:00
push @$ cmd , '-drive' , "if=pflash,unit=1,format=$format,id=drive-efidisk0,file=$path" ;
2015-11-21 10:48:59 +03:00
}
2019-07-08 12:25:10 +03:00
# load q35 config
if ( $ q35 ) {
# we use different pcie-port hardware for qemu >= 4.0 for passthrough
2019-11-19 14:23:49 +03:00
if ( min_version ( $ machine_version , 4 , 0 ) ) {
2019-07-08 12:25:10 +03:00
push @$ devices , '-readconfig' , '/usr/share/qemu-server/pve-q35-4.0.cfg' ;
} else {
push @$ devices , '-readconfig' , '/usr/share/qemu-server/pve-q35.cfg' ;
}
}
2016-02-10 14:52:12 +03:00
2020-01-31 17:41:21 +03:00
if ( $ conf - > { vmgenid } ) {
push @$ devices , '-device' , 'vmgenid,guid=' . $ conf - > { vmgenid } ;
}
2016-06-14 11:50:37 +03:00
# add usb controllers
2018-11-12 16:10:42 +03:00
my @ usbcontrollers = PVE::QemuServer::USB:: get_usb_controllers ( $ conf , $ bridges , $ arch , $ machine_type , $ usbdesc - > { format } , $ MAX_USB_DEVICES ) ;
2016-06-14 11:50:37 +03:00
push @$ devices , @ usbcontrollers if @ usbcontrollers ;
2018-11-09 15:31:09 +03:00
my $ vga = parse_vga ( $ conf - > { vga } ) ;
add multi-monitors spice support
add qxl2 (2monitors),qxl3 (3monitors),qxl4 (4monitors) vga type.
For linux, we only need 1 qxl card with more memory
For windows, we need 1 qxl card by monitor
Original Information from spice-mailing
"
You need to specify multiple devices for Windows VMs. This is what
libvirt gives me (via 'virsh domxml-to-native qemu argv DOMAIN_XML'):
<...> -vga qxl -global qxl-vga.ram_size=67108864 -global qxl-vga.vram_size=33554432 -device qxl,id=video1,ram_size=67108864,vram_size=33554432 -device qxl,id=video2,ram_size=67108864,vram_size=33554432 -device qxl,id=video3,ram_size=67108864,vram_size=33554432
For Linux VM, just one qxl device is OK but then it's advisable to
increase the available RAM:
<...> -vga qxl -global qxl-vga.ram_size=134217728 -global qxl-vga.vram_size=33554432
If you don't turn off surfaces, then you should increase vram size to
say 64 MB from current default of 32 MB.
"
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2013-09-25 17:33:08 +04:00
2018-11-09 15:31:09 +03:00
my $ qxlnum = vga_conf_has_spice ( $ conf - > { vga } ) ;
$ vga - > { type } = 'qxl' if $ qxlnum ;
add multi-monitors spice support
add qxl2 (2monitors),qxl3 (3monitors),qxl4 (4monitors) vga type.
For linux, we only need 1 qxl card with more memory
For windows, we need 1 qxl card by monitor
Original Information from spice-mailing
"
You need to specify multiple devices for Windows VMs. This is what
libvirt gives me (via 'virsh domxml-to-native qemu argv DOMAIN_XML'):
<...> -vga qxl -global qxl-vga.ram_size=67108864 -global qxl-vga.vram_size=33554432 -device qxl,id=video1,ram_size=67108864,vram_size=33554432 -device qxl,id=video2,ram_size=67108864,vram_size=33554432 -device qxl,id=video3,ram_size=67108864,vram_size=33554432
For Linux VM, just one qxl device is OK but then it's advisable to
increase the available RAM:
<...> -vga qxl -global qxl-vga.ram_size=134217728 -global qxl-vga.vram_size=33554432
If you don't turn off surfaces, then you should increase vram size to
say 64 MB from current default of 32 MB.
"
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2013-09-25 17:33:08 +04:00
2018-11-09 15:31:09 +03:00
if ( ! $ vga - > { type } ) {
2018-11-12 16:10:44 +03:00
if ( $ arch eq 'aarch64' ) {
$ vga - > { type } = 'virtio' ;
2019-11-19 14:23:49 +03:00
} elsif ( min_version ( $ machine_version , 2 , 9 ) ) {
2018-11-09 15:31:09 +03:00
$ vga - > { type } = ( ! $ winversion || $ winversion >= 6 ) ? 'std' : 'cirrus' ;
2017-07-12 08:34:51 +03:00
} else {
2018-11-09 15:31:09 +03:00
$ vga - > { type } = ( $ winversion >= 6 ) ? 'std' : 'cirrus' ;
2017-07-12 08:34:51 +03:00
}
2013-07-19 11:53:44 +04:00
}
2011-08-23 09:47:04 +04:00
# enable absolute mouse coordinates (needed by vnc)
2013-07-19 11:53:44 +04:00
my $ tablet ;
if ( defined ( $ conf - > { tablet } ) ) {
$ tablet = $ conf - > { tablet } ;
} else {
$ tablet = $ defaults - > { tablet } ;
2013-10-02 11:11:57 +04:00
$ tablet = 0 if $ qxlnum ; # disable for spice because it is not needed
2018-11-09 15:31:09 +03:00
$ tablet = 0 if $ vga - > { type } =~ m/^serial\d+$/ ; # disable if we use serial terminal (no vga card)
2013-07-19 11:53:44 +04:00
}
2018-11-12 16:10:42 +03:00
if ( $ tablet ) {
push @$ devices , '-device' , print_tabletdevice_full ( $ conf , $ arch ) if $ tablet ;
my $ kbd = print_keyboarddevice_full ( $ conf , $ arch ) ;
push @$ devices , '-device' , $ kbd if defined ( $ kbd ) ;
}
2014-11-10 08:31:08 +03:00
2016-01-26 12:16:08 +03:00
my $ kvm_off = 0 ;
2016-11-08 04:56:01 +03:00
my $ gpu_passthrough ;
2011-08-23 09:47:04 +04:00
# host pci devices
2011-09-11 10:59:59 +04:00
for ( my $ i = 0 ; $ i < $ MAX_HOSTPCI_DEVICES ; $ i + + ) {
2019-09-06 20:24:01 +03:00
my $ id = "hostpci$i" ;
my $ d = parse_hostpci ( $ conf - > { $ id } ) ;
2014-06-23 19:41:53 +04:00
next if ! $ d ;
2019-09-06 20:24:01 +03:00
if ( my $ pcie = $ d - > { pcie } ) {
2014-06-23 19:41:53 +04:00
die "q35 machine model is not enabled" if ! $ q35 ;
2018-12-17 15:57:08 +03:00
# win7 wants to have the pcie devices directly on the pcie bus
# instead of in the root port
if ( $ winversion == 7 ) {
2019-09-06 20:24:01 +03:00
$ pciaddr = print_pcie_addr ( "${id}bus0" ) ;
2018-12-17 15:57:08 +03:00
} else {
2019-09-05 19:13:00 +03:00
# add more root ports if needed, 4 are present by default
2019-09-06 20:24:01 +03:00
# by pve-q35 cfgs, rest added here on demand.
2019-09-05 19:13:00 +03:00
if ( $ i > 3 ) {
push @$ devices , '-device' , print_pcie_root_port ( $ i ) ;
}
2019-09-06 20:24:01 +03:00
$ pciaddr = print_pcie_addr ( $ id ) ;
2018-12-17 15:57:08 +03:00
}
2019-02-26 10:21:04 +03:00
} else {
2019-09-06 20:24:01 +03:00
$ pciaddr = print_pci_addr ( $ id , $ bridges , $ arch , $ machine_type ) ;
2014-06-23 19:41:53 +04:00
}
2016-03-30 13:20:12 +03:00
my $ xvga = '' ;
if ( $ d - > { 'x-vga' } ) {
2019-09-06 20:24:01 +03:00
$ xvga = ',x-vga=on' if ! ( $ conf - > { bios } && $ conf - > { bios } eq 'ovmf' ) ;
2016-01-26 12:16:08 +03:00
$ kvm_off = 1 ;
2019-01-21 17:56:54 +03:00
$ vga - > { type } = 'none' if ! defined ( $ conf - > { vga } ) ;
2016-11-08 04:56:01 +03:00
$ gpu_passthrough = 1 ;
2014-08-06 09:27:29 +04:00
}
2019-09-06 20:24:01 +03:00
add pci multifunction support
multifunction device should be define without the .function
hostpci0: 00:00
example
-------
if 00:00.0
00:00.1
00:00.2
exists,
then we generate the multifunction devices
-device (pci-assign|vfio-pci),host=00:00.0,id=hostpci0.0,bus=...,addr=0x0.0,multifunction=on
-device (pci-assign|vfio-pci),host=00:00.1,id=hostpci0.1,bus=...,addr=0x0.1
-device (pci-assign|vfio-pci),host=00:00.2,id=hostpci0.2,bus=...,addr=0x0.2
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-06-23 19:41:54 +04:00
my $ pcidevices = $ d - > { pciid } ;
my $ multifunction = 1 if @$ pcidevices > 1 ;
2019-09-06 20:24:01 +03:00
2018-11-20 19:13:39 +03:00
my $ sysfspath ;
if ( $ d - > { mdev } && scalar ( @$ pcidevices ) == 1 ) {
2019-09-06 20:24:01 +03:00
my $ pci_id = $ pcidevices - > [ 0 ] - > { id } ;
2018-11-20 19:13:39 +03:00
my $ uuid = PVE::SysFSTools:: generate_mdev_uuid ( $ vmid , $ i ) ;
2019-11-12 16:23:03 +03:00
$ sysfspath = "/sys/bus/pci/devices/$pci_id/$uuid" ;
2018-11-20 19:13:39 +03:00
} elsif ( $ d - > { mdev } ) {
2019-09-06 20:24:01 +03:00
warn "ignoring mediated device '$id' with multifunction device\n" ;
2018-11-20 19:13:39 +03:00
}
2014-06-23 19:41:53 +04:00
add pci multifunction support
multifunction device should be define without the .function
hostpci0: 00:00
example
-------
if 00:00.0
00:00.1
00:00.2
exists,
then we generate the multifunction devices
-device (pci-assign|vfio-pci),host=00:00.0,id=hostpci0.0,bus=...,addr=0x0.0,multifunction=on
-device (pci-assign|vfio-pci),host=00:00.1,id=hostpci0.1,bus=...,addr=0x0.1
-device (pci-assign|vfio-pci),host=00:00.2,id=hostpci0.2,bus=...,addr=0x0.2
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-06-23 19:41:54 +04:00
my $ j = 0 ;
2019-09-06 20:24:01 +03:00
foreach my $ pcidevice ( @$ pcidevices ) {
2018-11-20 19:13:39 +03:00
my $ devicestr = "vfio-pci" ;
2019-09-06 20:24:01 +03:00
2018-11-20 19:13:39 +03:00
if ( $ sysfspath ) {
$ devicestr . = ",sysfsdev=$sysfspath" ;
} else {
2018-11-20 19:13:40 +03:00
$ devicestr . = ",host=$pcidevice->{id}" ;
2018-11-20 19:13:39 +03:00
}
add pci multifunction support
multifunction device should be define without the .function
hostpci0: 00:00
example
-------
if 00:00.0
00:00.1
00:00.2
exists,
then we generate the multifunction devices
-device (pci-assign|vfio-pci),host=00:00.0,id=hostpci0.0,bus=...,addr=0x0.0,multifunction=on
-device (pci-assign|vfio-pci),host=00:00.1,id=hostpci0.1,bus=...,addr=0x0.1
-device (pci-assign|vfio-pci),host=00:00.2,id=hostpci0.2,bus=...,addr=0x0.2
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-06-23 19:41:54 +04:00
2019-09-06 20:24:01 +03:00
my $ mf_addr = $ multifunction ? ".$j" : '' ;
$ devicestr . = ",id=${id}${mf_addr}${pciaddr}${mf_addr}" ;
if ( $ j == 0 ) {
$ devicestr . = ',rombar=0' if defined ( $ d - > { rombar } ) && ! $ d - > { rombar } ;
$ devicestr . = "$xvga" ;
add pci multifunction support
multifunction device should be define without the .function
hostpci0: 00:00
example
-------
if 00:00.0
00:00.1
00:00.2
exists,
then we generate the multifunction devices
-device (pci-assign|vfio-pci),host=00:00.0,id=hostpci0.0,bus=...,addr=0x0.0,multifunction=on
-device (pci-assign|vfio-pci),host=00:00.1,id=hostpci0.1,bus=...,addr=0x0.1
-device (pci-assign|vfio-pci),host=00:00.2,id=hostpci0.2,bus=...,addr=0x0.2
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-06-23 19:41:54 +04:00
$ devicestr . = ",multifunction=on" if $ multifunction ;
2019-09-06 20:24:01 +03:00
$ devicestr . = ",romfile=/usr/share/kvm/$d->{romfile}" if $ d - > { romfile } ;
add pci multifunction support
multifunction device should be define without the .function
hostpci0: 00:00
example
-------
if 00:00.0
00:00.1
00:00.2
exists,
then we generate the multifunction devices
-device (pci-assign|vfio-pci),host=00:00.0,id=hostpci0.0,bus=...,addr=0x0.0,multifunction=on
-device (pci-assign|vfio-pci),host=00:00.1,id=hostpci0.1,bus=...,addr=0x0.1
-device (pci-assign|vfio-pci),host=00:00.2,id=hostpci0.2,bus=...,addr=0x0.2
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2014-06-23 19:41:54 +04:00
}
push @$ devices , '-device' , $ devicestr ;
$ j + + ;
}
2011-08-23 09:47:04 +04:00
}
# usb devices
2019-09-11 15:43:32 +03:00
my $ usb_dev_features = { } ;
2019-11-19 14:23:49 +03:00
$ usb_dev_features - > { spice_usb3 } = 1 if min_version ( $ machine_version , 4 , 0 ) ;
2019-09-11 15:43:32 +03:00
my @ usbdevices = PVE::QemuServer::USB:: get_usb_devices ( $ conf , $ usbdesc - > { format } , $ MAX_USB_DEVICES , $ usb_dev_features ) ;
2016-06-14 11:50:37 +03:00
push @$ devices , @ usbdevices if @ usbdevices ;
2011-08-23 09:47:04 +04:00
# serial devices
2011-09-11 11:00:00 +04:00
for ( my $ i = 0 ; $ i < $ MAX_SERIAL_PORTS ; $ i + + ) {
2011-09-12 10:59:05 +04:00
if ( my $ path = $ conf - > { "serial$i" } ) {
2013-07-31 08:58:26 +04:00
if ( $ path eq 'socket' ) {
my $ socket = "/var/run/qemu-server/${vmid}.serial$i" ;
push @$ devices , '-chardev' , "socket,id=serial$i,path=$socket,server,nowait" ;
2018-11-12 16:10:41 +03:00
# On aarch64, serial0 is the UART device. Qemu only allows
# connecting UART devices via the '-serial' command line, as
# the device has a fixed slot on the hardware...
if ( $ arch eq 'aarch64' && $ i == 0 ) {
push @$ devices , '-serial' , "chardev:serial$i" ;
} else {
push @$ devices , '-device' , "isa-serial,chardev=serial$i" ;
}
2013-07-31 08:58:26 +04:00
} else {
die "no such serial device\n" if ! - c $ path ;
push @$ devices , '-chardev' , "tty,id=serial$i,path=$path" ;
push @$ devices , '-device' , "isa-serial,chardev=serial$i" ;
}
2011-09-12 10:59:05 +04:00
}
2011-08-23 09:47:04 +04:00
}
# parallel devices
2011-09-11 11:00:01 +04:00
for ( my $ i = 0 ; $ i < $ MAX_PARALLEL_PORTS ; $ i + + ) {
2011-09-12 10:59:05 +04:00
if ( my $ path = $ conf - > { "parallel$i" } ) {
2011-09-12 14:26:00 +04:00
die "no such parallel device\n" if ! - c $ path ;
2013-08-14 17:55:01 +04:00
my $ devtype = $ path =~ m !^/dev/usb/lp! ? 'tty' : 'parport' ;
2013-08-14 16:22:24 +04:00
push @$ devices , '-chardev' , "$devtype,id=parallel$i,path=$path" ;
2012-08-20 13:10:24 +04:00
push @$ devices , '-device' , "isa-parallel,chardev=parallel$i" ;
2011-09-12 10:59:05 +04:00
}
2011-08-23 09:47:04 +04:00
}
2019-07-23 19:09:32 +03:00
if ( my $ audio = conf_has_audio ( $ conf ) ) {
2019-07-17 16:58:57 +03:00
my $ audiopciaddr = print_pci_addr ( "audio0" , $ bridges , $ arch , $ machine_type ) ;
2019-07-23 19:09:32 +03:00
my $ id = $ audio - > { dev_id } ;
if ( $ audio - > { dev } eq 'AC97' ) {
push @$ devices , '-device' , "AC97,id=${id}${audiopciaddr}" ;
} elsif ( $ audio - > { dev } =~ /intel\-hda$/ ) {
push @$ devices , '-device' , "$audio->{dev},id=${id}${audiopciaddr}" ;
push @$ devices , '-device' , "hda-micro,id=${id}-codec0,bus=${id}.0,cad=0" ;
push @$ devices , '-device' , "hda-duplex,id=${id}-codec1,bus=${id}.0,cad=1" ;
2019-07-18 10:08:59 +03:00
} else {
2019-07-23 19:09:32 +03:00
die "unkown audio device '$audio->{dev}', implement me!" ;
2019-07-17 16:58:57 +03:00
}
2019-07-19 16:15:44 +03:00
2019-07-23 19:09:32 +03:00
push @$ devices , '-audiodev' , "$audio->{backend},id=$audio->{backend_id}" ;
2019-07-17 16:58:57 +03:00
}
2011-09-12 14:26:00 +04:00
2011-08-23 09:47:04 +04:00
my $ sockets = 1 ;
$ sockets = $ conf - > { smp } if $ conf - > { smp } ; # old style - no longer iused
$ sockets = $ conf - > { sockets } if $ conf - > { sockets } ;
my $ cores = $ conf - > { cores } || 1 ;
2014-01-07 16:32:50 +04:00
2015-01-09 18:30:35 +03:00
my $ maxcpus = $ sockets * $ cores ;
2014-11-17 11:52:30 +03:00
2015-01-09 18:30:35 +03:00
my $ vcpus = $ conf - > { vcpus } ? $ conf - > { vcpus } : $ maxcpus ;
2014-11-17 11:52:30 +03:00
2015-01-09 18:30:35 +03:00
my $ allowed_vcpus = $ cpuinfo - > { cpus } ;
2015-09-14 09:44:27 +03:00
die "MAX $allowed_vcpus vcpus allowed per VM on this node\n"
2015-01-09 18:30:35 +03:00
if ( $ allowed_vcpus < $ maxcpus ) ;
2019-11-19 14:23:49 +03:00
if ( $ hotplug_features - > { cpu } && min_version ( $ machine_version , 2 , 7 ) ) {
2011-08-23 09:47:04 +04:00
2016-10-17 13:18:55 +03:00
push @$ cmd , '-smp' , "1,sockets=$sockets,cores=$cores,maxcpus=$maxcpus" ;
for ( my $ i = 2 ; $ i <= $ vcpus ; $ i + + ) {
my $ cpustr = print_cpu_device ( $ conf , $ i ) ;
push @$ cmd , '-device' , $ cpustr ;
}
} else {
push @$ cmd , '-smp' , "$vcpus,sockets=$sockets,cores=$cores,maxcpus=$maxcpus" ;
}
2011-08-23 09:47:04 +04:00
push @$ cmd , '-nodefaults' ;
2011-09-29 09:43:05 +04:00
my $ bootorder = $ conf - > { boot } || $ confdesc - > { boot } - > { default } ;
2011-12-07 14:54:31 +04:00
2011-12-08 14:41:58 +04:00
my $ bootindex_hash = { } ;
my $ i = 1 ;
foreach my $ o ( split ( // , $ bootorder ) ) {
$ bootindex_hash - > { $ o } = $ i * 100 ;
$ i + + ;
2012-01-27 12:35:26 +04:00
}
2011-12-07 14:54:31 +04:00
2016-09-08 10:25:57 +03:00
push @$ cmd , '-boot' , "menu=on,strict=on,reboot-timeout=1000,splash=/usr/share/qemu-server/bootsplash.jpg" ;
2011-08-23 09:47:04 +04:00
2011-09-15 11:11:27 +04:00
push @$ cmd , '-no-acpi' if defined ( $ conf - > { acpi } ) && $ conf - > { acpi } == 0 ;
2011-08-23 09:47:04 +04:00
2011-09-15 11:11:27 +04:00
push @$ cmd , '-no-reboot' if defined ( $ conf - > { reboot } ) && $ conf - > { reboot } == 0 ;
2011-08-23 09:47:04 +04:00
2018-11-12 10:40:20 +03:00
if ( $ vga - > { type } && $ vga - > { type } !~ m/^serial\d+$/ && $ vga - > { type } ne 'none' ) {
2019-11-19 14:23:49 +03:00
push @$ devices , '-device' , print_vga_device ( $ conf , $ vga , $ arch , $ machine_version , $ machine_type , undef , $ qxlnum , $ bridges ) ;
2019-11-19 14:23:44 +03:00
my $ socket = PVE::QemuServer::Helpers:: vnc_socket ( $ vmid ) ;
2019-06-06 10:49:47 +03:00
push @$ cmd , '-vnc' , "unix:$socket,password" ;
2016-02-18 10:14:43 +03:00
} else {
2018-11-09 15:31:09 +03:00
push @$ cmd , '-vga' , 'none' if $ vga - > { type } eq 'none' ;
2016-02-18 10:14:43 +03:00
push @$ cmd , '-nographic' ;
}
2011-08-23 09:47:04 +04:00
# time drift fix
2011-09-15 11:11:27 +04:00
my $ tdf = defined ( $ conf - > { tdf } ) ? $ conf - > { tdf } : $ defaults - > { tdf } ;
2011-08-23 09:47:04 +04:00
2012-09-26 14:42:03 +04:00
my $ useLocaltime = $ conf - > { localtime } ;
2011-08-23 09:47:04 +04:00
2016-11-08 04:56:01 +03:00
if ( $ winversion >= 5 ) { # windows
$ useLocaltime = 1 if ! defined ( $ conf - > { localtime } ) ;
2016-05-20 11:26:08 +03:00
2016-11-08 04:56:01 +03:00
# use time drift fix when acpi is enabled
if ( ! ( defined ( $ conf - > { acpi } ) && $ conf - > { acpi } == 0 ) ) {
$ tdf = 1 if ! defined ( $ conf - > { tdf } ) ;
2013-07-15 10:51:37 +04:00
}
2016-11-08 04:56:01 +03:00
}
2013-07-15 10:51:37 +04:00
2016-11-08 04:56:01 +03:00
if ( $ winversion >= 6 ) {
push @$ globalFlags , 'kvm-pit.lost_tick_policy=discard' ;
push @$ cmd , '-no-hpet' ;
2011-08-23 09:47:04 +04:00
}
2012-09-26 14:42:03 +04:00
push @$ rtcFlags , 'driftfix=slew' if $ tdf ;
2019-07-04 12:00:58 +03:00
if ( ( $ conf - > { startdate } ) && ( $ conf - > { startdate } ne 'now' ) ) {
2012-09-26 14:42:03 +04:00
push @$ rtcFlags , "base=$conf->{startdate}" ;
} elsif ( $ useLocaltime ) {
push @$ rtcFlags , 'base=localtime' ;
}
2011-08-23 09:47:04 +04:00
2019-11-19 14:23:49 +03:00
push @$ cmd , get_cpu_options ( $ conf , $ arch , $ kvm , $ kvm_off , $ machine_version , $ winversion , $ gpu_passthrough ) ;
2013-07-15 10:51:35 +04:00
2016-05-23 10:47:52 +03:00
PVE::QemuServer::Memory:: config ( $ conf , $ vmid , $ sockets , $ cores , $ defaults , $ hotplug_features , $ cmd ) ;
2019-05-03 15:22:38 +03:00
2011-08-23 09:47:04 +04:00
push @$ cmd , '-S' if $ conf - > { freeze } ;
2018-03-20 16:26:43 +03:00
push @$ cmd , '-k' , $ conf - > { keyboard } if defined ( $ conf - > { keyboard } ) ;
2011-08-23 09:47:04 +04:00
2019-11-18 09:46:12 +03:00
my $ guest_agent = parse_guest_agent ( $ conf ) ;
if ( $ guest_agent - > { enabled } ) {
2019-11-19 14:23:44 +03:00
my $ qgasocket = PVE::QemuServer::Helpers:: qmp_socket ( $ vmid , 1 ) ;
2012-09-03 11:51:08 +04:00
push @$ devices , '-chardev' , "socket,path=$qgasocket,server,nowait,id=qga0" ;
2019-11-18 09:46:12 +03:00
2019-11-20 15:24:57 +03:00
if ( ! $ guest_agent - > { type } || $ guest_agent - > { type } eq 'virtio' ) {
2019-11-18 09:46:12 +03:00
my $ pciaddr = print_pci_addr ( "qga0" , $ bridges , $ arch , $ machine_type ) ;
push @$ devices , '-device' , "virtio-serial,id=qga0$pciaddr" ;
push @$ devices , '-device' , 'virtserialport,chardev=qga0,name=org.qemu.guest_agent.0' ;
} elsif ( $ guest_agent - > { type } eq 'isa' ) {
push @$ devices , '-device' , "isa-serial,chardev=qga0" ;
}
2012-09-03 11:51:08 +04:00
}
fix #2264: add virtio-rng device
Allow a user to add a virtio-rng-pci (an emulated hardware random
number generator) to a VM with the rng0 setting. The setting is
version_guard()-ed.
Limit the selection of entropy source to one of three:
/dev/urandom (preferred): Non-blocking kernel entropy source
/dev/random: Blocking kernel source
/dev/hwrng: Hardware RNG on the host for passthrough
QEMU itself defaults to /dev/urandom (or the equivalent getrandom()
call) if no source file is given, but I don't fully trust that
behaviour to stay constant, considering the documentation [0] already
disagrees with the code [1], so let's always specify the file ourselves.
/dev/urandom is preferred, since it prevents host entropy starvation.
The quality of randomness is still good enough to emulate a hwrng, since
a) it's still seeded from the kernel's true entropy pool periodically
and b) it's mixed with true entropy in the guest as well.
Additionally, all sources about entropy predicition attacks I could find
mention that to predict /dev/urandom results, /dev/random has to be
accessed or manipulated in one way or the other - this is not possible
from a VM however, as the entropy we're talking about comes from the
*hosts* blocking pool.
More about the entropy and security implications of the non-blocking
interface in [2] and [3].
Note further that only one /dev/hwrng exists at any given time, if
multiple RNGs are available, only the one selected in
'/sys/devices/virtual/misc/hw_random/rng_current' will feed the file.
Selecting this is left as an exercise to the user, if at all required.
We limit the available entropy to 1 KiB/s by default, but allow the user
to override this. Interesting to note is that the limiter does not work
linearly, i.e. max_bytes=1024/period=1000 means that up to 1 KiB of data
becomes available on a 1000 millisecond timer, not that 1 KiB is
streamed to the guest over the course of one second - hence the
configurable period.
The default used here is the same as given in the QEMU documentation [0]
and has been verified to affect entropy availability in a guest by
measuring /dev/random throughput. 1 KiB/s is enough to avoid any
early-boot entropy shortages, and already has a significant impact on
/dev/random availability in the guest.
[0] https://wiki.qemu.org/Features/VirtIORNG
[1] https://git.qemu.org/?p=qemu.git;a=blob;f=crypto/random-platform.c;h=f92f96987d7d262047c7604b169a7fdf11236107;hb=HEAD
[2] https://lwn.net/Articles/261804/
[3] https://lwn.net/Articles/808575/
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-02-20 20:10:44 +03:00
my $ rng = parse_rng ( $ conf - > { rng0 } ) if $ conf - > { rng0 } ;
if ( $ rng && & $ version_guard ( 4 , 1 , 2 ) ) {
my $ max_bytes = $ rng - > { max_bytes } // $ rng_fmt - > { max_bytes } - > { default } ;
my $ period = $ rng - > { period } // $ rng_fmt - > { period } - > { default } ;
my $ limiter_str = "" ;
if ( $ max_bytes ) {
$ limiter_str = ",max-bytes=$max_bytes,period=$period" ;
}
# mostly relevant for /dev/hwrng, but doesn't hurt to check others too
die "cannot create VirtIO RNG device: source file '$rng->{source}' doesn't exist\n"
if ! - e $ rng - > { source } ;
my $ rng_addr = print_pci_addr ( "rng0" , $ bridges , $ arch , $ machine_type ) ;
push @$ devices , '-object' , "rng-random,filename=$rng->{source},id=rng0" ;
push @$ devices , '-device' , "virtio-rng-pci,rng=rng0$limiter_str$rng_addr" ;
}
2013-07-24 13:24:20 +04:00
my $ spice_port ;
add multi-monitors spice support
add qxl2 (2monitors),qxl3 (3monitors),qxl4 (4monitors) vga type.
For linux, we only need 1 qxl card with more memory
For windows, we need 1 qxl card by monitor
Original Information from spice-mailing
"
You need to specify multiple devices for Windows VMs. This is what
libvirt gives me (via 'virsh domxml-to-native qemu argv DOMAIN_XML'):
<...> -vga qxl -global qxl-vga.ram_size=67108864 -global qxl-vga.vram_size=33554432 -device qxl,id=video1,ram_size=67108864,vram_size=33554432 -device qxl,id=video2,ram_size=67108864,vram_size=33554432 -device qxl,id=video3,ram_size=67108864,vram_size=33554432
For Linux VM, just one qxl device is OK but then it's advisable to
increase the available RAM:
<...> -vga qxl -global qxl-vga.ram_size=134217728 -global qxl-vga.vram_size=33554432
If you don't turn off surfaces, then you should increase vram size to
say 64 MB from current default of 32 MB.
"
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2013-09-25 17:33:08 +04:00
2013-10-02 11:11:57 +04:00
if ( $ qxlnum ) {
if ( $ qxlnum > 1 ) {
2016-11-11 11:32:11 +03:00
if ( $ winversion ) {
2013-10-02 11:11:57 +04:00
for ( my $ i = 1 ; $ i < $ qxlnum ; $ i + + ) {
2019-11-19 14:23:49 +03:00
push @$ devices , '-device' , print_vga_device ( $ conf , $ vga , $ arch , $ machine_version , $ machine_type , $ i , $ qxlnum , $ bridges ) ;
2013-10-02 11:11:57 +04:00
}
} else {
# assume other OS works like Linux
2018-11-09 15:31:09 +03:00
my ( $ ram , $ vram ) = ( "134217728" , "67108864" ) ;
if ( $ vga - > { memory } ) {
$ ram = PVE::Tools:: convert_size ( $ qxlnum * 4 * $ vga - > { memory } , 'mb' = > 'b' ) ;
$ vram = PVE::Tools:: convert_size ( $ qxlnum * 2 * $ vga - > { memory } , 'mb' = > 'b' ) ;
}
push @$ cmd , '-global' , "qxl-vga.ram_size=$ram" ;
push @$ cmd , '-global' , "qxl-vga.vram_size=$vram" ;
add multi-monitors spice support
add qxl2 (2monitors),qxl3 (3monitors),qxl4 (4monitors) vga type.
For linux, we only need 1 qxl card with more memory
For windows, we need 1 qxl card by monitor
Original Information from spice-mailing
"
You need to specify multiple devices for Windows VMs. This is what
libvirt gives me (via 'virsh domxml-to-native qemu argv DOMAIN_XML'):
<...> -vga qxl -global qxl-vga.ram_size=67108864 -global qxl-vga.vram_size=33554432 -device qxl,id=video1,ram_size=67108864,vram_size=33554432 -device qxl,id=video2,ram_size=67108864,vram_size=33554432 -device qxl,id=video3,ram_size=67108864,vram_size=33554432
For Linux VM, just one qxl device is OK but then it's advisable to
increase the available RAM:
<...> -vga qxl -global qxl-vga.ram_size=134217728 -global qxl-vga.vram_size=33554432
If you don't turn off surfaces, then you should increase vram size to
say 64 MB from current default of 32 MB.
"
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2013-09-25 17:33:08 +04:00
}
}
2018-11-12 16:10:42 +03:00
my $ pciaddr = print_pci_addr ( "spice" , $ bridges , $ arch , $ machine_type ) ;
2013-07-24 11:52:33 +04:00
2015-05-12 13:14:03 +03:00
my $ pfamily = PVE::Tools:: get_host_address_family ( $ nodename ) ;
2017-05-30 16:30:14 +03:00
my @ nodeaddrs = PVE::Tools:: getaddrinfo_all ( 'localhost' , family = > $ pfamily ) ;
die "failed to get an ip address of type $pfamily for 'localhost'\n" if ! @ nodeaddrs ;
2019-10-08 18:56:15 +03:00
push @$ devices , '-device' , "virtio-serial,id=spice$pciaddr" ;
push @$ devices , '-chardev' , "spicevmc,id=vdagent,name=vdagent" ;
push @$ devices , '-device' , "virtserialport,chardev=vdagent,name=com.redhat.spice.0" ;
2017-05-30 16:30:14 +03:00
my $ localhost = PVE::Network:: addr_to_ip ( $ nodeaddrs [ 0 ] - > { addr } ) ;
$ spice_port = PVE::Tools:: next_spice_port ( $ pfamily , $ localhost ) ;
2013-07-17 13:33:02 +04:00
2019-09-04 14:26:11 +03:00
my $ spice_enhancement = PVE::JSONSchema:: parse_property_string ( $ spice_enhancements_fmt , $ conf - > { spice_enhancements } // '' ) ;
if ( $ spice_enhancement - > { foldersharing } ) {
push @$ devices , '-chardev' , "spiceport,id=foldershare,name=org.spice-space.webdav.0" ;
push @$ devices , '-device' , "virtserialport,chardev=foldershare,name=org.spice-space.webdav.0" ;
}
2019-08-22 18:33:18 +03:00
2019-09-04 14:26:11 +03:00
my $ spice_opts = "tls-port=${spice_port},addr=$localhost,tls-ciphers=HIGH,seamless-migration=on" ;
$ spice_opts . = ",streaming-video=$spice_enhancement->{videostreaming}" if $ spice_enhancement - > { videostreaming } ;
push @$ devices , '-spice' , "$spice_opts" ;
2013-06-25 09:10:42 +04:00
}
2012-12-19 10:30:34 +04:00
# enable balloon by default, unless explicitly disabled
if ( ! defined ( $ conf - > { balloon } ) || $ conf - > { balloon } ) {
2018-11-12 16:10:42 +03:00
$ pciaddr = print_pci_addr ( "balloon0" , $ bridges , $ arch , $ machine_type ) ;
2012-12-19 10:30:34 +04:00
push @$ devices , '-device' , "virtio-balloon-pci,id=balloon0$pciaddr" ;
}
2011-08-23 09:47:04 +04:00
2011-09-08 13:39:56 +04:00
if ( $ conf - > { watchdog } ) {
my $ wdopts = parse_watchdog ( $ conf - > { watchdog } ) ;
2018-11-12 16:10:42 +03:00
$ pciaddr = print_pci_addr ( "watchdog" , $ bridges , $ arch , $ machine_type ) ;
2011-10-03 16:53:10 +04:00
my $ watchdog = $ wdopts - > { model } || 'i6300esb' ;
2012-08-20 13:10:24 +04:00
push @$ devices , '-device' , "$watchdog$pciaddr" ;
push @$ devices , '-watchdog-action' , $ wdopts - > { action } if $ wdopts - > { action } ;
2011-09-08 13:39:56 +04:00
}
2011-08-23 09:47:04 +04:00
my $ vollist = [] ;
2011-09-09 12:27:21 +04:00
my $ scsicontroller = { } ;
2012-02-01 16:25:20 +04:00
my $ ahcicontroller = { } ;
2012-07-30 16:58:40 +04:00
my $ scsihw = defined ( $ conf - > { scsihw } ) ? $ conf - > { scsihw } : $ defaults - > { scsihw } ;
2011-08-23 09:47:04 +04:00
2014-05-17 11:14:58 +04:00
# Add iscsi initiator name if available
if ( my $ initiator = get_initiator_name ( ) ) {
push @$ devices , '-iscsi' , "initiator-name=$initiator" ;
}
2011-08-23 09:47:04 +04:00
foreach_drive ( $ conf , sub {
my ( $ ds , $ drive ) = @ _ ;
2011-11-25 11:05:36 +04:00
if ( PVE::Storage:: parse_volume_id ( $ drive - > { file } , 1 ) ) {
2011-08-23 09:47:04 +04:00
push @$ vollist , $ drive - > { file } ;
2011-11-25 11:05:36 +04:00
}
2012-01-27 12:35:26 +04:00
2017-09-11 09:40:29 +03:00
# ignore efidisk here, already added in bios/fw handling code above
return if $ drive - > { interface } eq 'efidisk' ;
2011-08-23 09:47:04 +04:00
$ use_virtio = 1 if $ ds =~ m/^virtio/ ;
2011-12-07 14:54:31 +04:00
if ( drive_is_cdrom ( $ drive ) ) {
if ( $ bootindex_hash - > { d } ) {
$ drive - > { bootindex } = $ bootindex_hash - > { d } ;
$ bootindex_hash - > { d } += 1 ;
}
} else {
if ( $ bootindex_hash - > { c } ) {
$ drive - > { bootindex } = $ bootindex_hash - > { c } if $ conf - > { bootdisk } && ( $ conf - > { bootdisk } eq $ ds ) ;
$ bootindex_hash - > { c } += 1 ;
}
}
2015-03-19 13:06:11 +03:00
if ( $ drive - > { interface } eq 'virtio' ) {
push @$ cmd , '-object' , "iothread,id=iothread-$ds" if $ drive - > { iothread } ;
}
2011-09-09 12:27:21 +04:00
if ( $ drive - > { interface } eq 'scsi' ) {
2012-07-30 16:58:40 +04:00
2015-03-27 05:41:54 +03:00
my ( $ maxdev , $ controller , $ controller_prefix ) = scsihw_infos ( $ conf , $ drive ) ;
2015-03-27 05:41:52 +03:00
2020-02-10 18:05:36 +03:00
die "scsi$drive->{index}: machine version 4.1~pve2 or higher is required to use more than 14 SCSI disks\n"
if $ drive - > { index } > 13 && ! & $ version_guard ( 4 , 1 , 2 ) ;
2018-11-12 16:10:42 +03:00
$ pciaddr = print_pci_addr ( "$controller_prefix$controller" , $ bridges , $ arch , $ machine_type ) ;
2015-03-27 08:16:24 +03:00
my $ scsihw_type = $ scsihw =~ m/^virtio-scsi-single/ ? "virtio-scsi-pci" : $ scsihw ;
2015-04-01 06:11:43 +03:00
my $ iothread = '' ;
if ( $ conf - > { scsihw } && $ conf - > { scsihw } eq "virtio-scsi-single" && $ drive - > { iothread } ) {
$ iothread . = ",iothread=iothread-$controller_prefix$controller" ;
push @$ cmd , '-object' , "iothread,id=iothread-$controller_prefix$controller" ;
2016-05-03 15:00:28 +03:00
} elsif ( $ drive - > { iothread } ) {
warn "iothread is only valid with virtio disk or virtio-scsi-single controller, ignoring\n" ;
2015-04-01 06:11:43 +03:00
}
2015-04-02 07:10:54 +03:00
my $ queues = '' ;
if ( $ conf - > { scsihw } && $ conf - > { scsihw } eq "virtio-scsi-single" && $ drive - > { queues } ) {
$ queues = ",num_queues=$drive->{queues}" ;
2019-05-03 15:22:38 +03:00
}
2015-04-02 07:10:54 +03:00
push @$ devices , '-device' , "$scsihw_type,id=$controller_prefix$controller$pciaddr$iothread$queues" if ! $ scsicontroller - > { $ controller } ;
2012-07-30 16:58:40 +04:00
$ scsicontroller - > { $ controller } = 1 ;
2011-09-09 12:27:21 +04:00
}
2011-12-07 14:54:31 +04:00
2012-02-01 16:25:20 +04:00
if ( $ drive - > { interface } eq 'sata' ) {
2020-03-02 13:33:44 +03:00
my $ controller = int ( $ drive - > { index } / $ PVE:: QemuServer:: Drive:: MAX_SATA_DISKS ) ;
2018-11-12 16:10:42 +03:00
$ pciaddr = print_pci_addr ( "ahci$controller" , $ bridges , $ arch , $ machine_type ) ;
2012-08-20 13:10:24 +04:00
push @$ devices , '-device' , "ahci,id=ahci$controller,multifunction=on$pciaddr" if ! $ ahcicontroller - > { $ controller } ;
2012-02-01 16:25:20 +04:00
$ ahcicontroller - > { $ controller } = 1 ;
}
2014-05-17 11:07:18 +04:00
2020-03-02 13:33:45 +03:00
my $ drive_cmd = print_drive_commandline_full ( $ storecfg , $ vmid , $ drive ) ;
2014-05-13 05:10:40 +04:00
push @$ devices , '-drive' , $ drive_cmd ;
2018-11-12 16:10:42 +03:00
push @$ devices , '-device' , print_drivedevice_full ( $ storecfg , $ conf , $ vmid , $ drive , $ bridges , $ arch , $ machine_type ) ;
2011-08-23 09:47:04 +04:00
} ) ;
2012-01-28 14:02:28 +04:00
for ( my $ i = 0 ; $ i < $ MAX_NETS ; $ i + + ) {
2012-01-28 20:07:33 +04:00
next if ! $ conf - > { "net$i" } ;
2012-01-28 14:02:28 +04:00
my $ d = parse_net ( $ conf - > { "net$i" } ) ;
next if ! $ d ;
2011-08-23 09:47:04 +04:00
2012-01-28 14:02:28 +04:00
$ use_virtio = 1 if $ d - > { model } eq 'virtio' ;
2011-08-23 09:47:04 +04:00
2012-01-28 14:02:28 +04:00
if ( $ bootindex_hash - > { n } ) {
$ d - > { bootindex } = $ bootindex_hash - > { n } ;
$ bootindex_hash - > { n } += 1 ;
}
2011-08-23 09:47:04 +04:00
2018-11-12 16:10:42 +03:00
my $ netdevfull = print_netdev_full ( $ vmid , $ conf , $ arch , $ d , "net$i" ) ;
2012-08-20 13:10:24 +04:00
push @$ devices , '-netdev' , $ netdevfull ;
2018-11-12 16:10:42 +03:00
my $ netdevicefull = print_netdevice_full ( $ vmid , $ conf , $ d , "net$i" , $ bridges , $ use_old_bios_files , $ arch , $ machine_type ) ;
2012-08-20 13:10:24 +04:00
push @$ devices , '-device' , $ netdevicefull ;
}
2011-08-23 09:47:04 +04:00
2019-02-22 13:38:33 +03:00
if ( $ conf - > { ivshmem } ) {
my $ ivshmem = PVE::JSONSchema:: parse_property_string ( $ ivshmem_fmt , $ conf - > { ivshmem } ) ;
2019-02-26 10:09:43 +03:00
2019-02-22 13:38:33 +03:00
my $ bus ;
if ( $ q35 ) {
$ bus = print_pcie_addr ( "ivshmem" ) ;
} else {
$ bus = print_pci_addr ( "ivshmem" , $ bridges , $ arch , $ machine_type ) ;
}
2019-02-26 10:09:43 +03:00
my $ ivshmem_name = $ ivshmem - > { name } // $ vmid ;
my $ path = '/dev/shm/pve-shm-' . $ ivshmem_name ;
2019-02-22 13:38:33 +03:00
push @$ devices , '-device' , "ivshmem-plain,memdev=ivshmem$bus," ;
push @$ devices , '-object' , "memory-backend-file,id=ivshmem,share=on,mem-path=$path,size=$ivshmem->{size}M" ;
}
2020-01-31 17:41:22 +03:00
# pci.4 is nested in pci.1
$ bridges - > { 1 } = 1 if $ bridges - > { 4 } ;
2014-06-18 08:54:45 +04:00
if ( ! $ q35 ) {
# add pci bridges
2019-11-19 14:23:49 +03:00
if ( min_version ( $ machine_version , 2 , 3 ) ) {
2015-03-17 11:47:10 +03:00
$ bridges - > { 1 } = 1 ;
$ bridges - > { 2 } = 1 ;
}
2015-03-27 05:41:52 +03:00
$ bridges - > { 3 } = 1 if $ scsihw =~ m/^virtio-scsi-single/ ;
2020-01-31 17:41:22 +03:00
}
for my $ k ( sort { $ b cmp $ a } keys %$ bridges ) {
next if $ q35 && $ k < 4 ; # q35.cfg already includes bridges up to 3
$ pciaddr = print_pci_addr ( "pci.$k" , undef , $ arch , $ machine_type ) ;
my $ devstr = "pci-bridge,id=pci.$k,chassis_nr=$k$pciaddr" ;
if ( $ q35 ) {
# add after -readconfig pve-q35.cfg
splice @$ devices , 2 , 0 , '-device' , $ devstr ;
} else {
unshift @$ devices , '-device' , $ devstr if $ k > 0 ;
2014-05-26 11:49:56 +04:00
}
2011-09-12 14:26:00 +04:00
}
Use 'QEMU version' -> '+pve-version' mapping for machine types
The previously introduced approach can fail for pinned versions when a
new QEMU release is introduced. The saner approach is to use a mapping
that gives one pve-version for each QEMU release.
Fortunately, the old system has not been bumped yet, so we can still
change it without too much effort.
QEMU versions without a mapping are assumed to be pve0, 4.1 is mapped to
pve1 since thats what we had as our default previously.
Pinned machine versions (i.e. pc-i440fx-4.1) are always assumed to be
pve0, for specific pve-versions they'd have to be pinned as well (i.e.
pc-i440fx-4.1+pve1).
The new logic also makes the pve-version dynamic, and starts VMs with
the lowest possible 'feature-level', i.e. if a feature is only available
with 4.1+pve2, but the VM isn't using it, we still start it with
4.1+pve0.
We die if we don't support a version that is requested from us. This
allows us to use the pve-version as live-migration blocks (i.e. bumping
the version and then live-migrating a VM which uses the new feature (so
is running with the bumped version) to an outdated node will present the
user with a helpful error message and fail instead of silently modifying
the config and only failing *after* the migration).
$version_guard is introduced in config_to_command to use for features
that need to check pve-version, it automatically handles selecting the
newest necessary pve-version for the VM.
Tests have to be adjusted, since all of them now resolve to pve0 instead
of pve1. EXPECT_ERROR matching is changed to use 'eq' instead of regex
to allow special characters in error messages.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
2020-02-10 18:05:35 +03:00
if ( ! $ kvm ) {
push @$ machineFlags , 'accel=tcg' ;
}
my $ machine_type_min = $ machine_type ;
if ( $ add_pve_version ) {
$ machine_type_min =~ s/\+pve\d+$// ;
$ machine_type_min . = "+pve$required_pve_version" ;
}
push @$ machineFlags , "type=${machine_type_min}" ;
2012-08-20 13:10:24 +04:00
push @$ cmd , @$ devices ;
2013-07-15 11:13:31 +04:00
push @$ cmd , '-rtc' , join ( ',' , @$ rtcFlags )
2012-09-26 14:42:03 +04:00
if scalar ( @$ rtcFlags ) ;
2013-07-15 11:13:31 +04:00
push @$ cmd , '-machine' , join ( ',' , @$ machineFlags )
2012-09-26 14:42:03 +04:00
if scalar ( @$ machineFlags ) ;
push @$ cmd , '-global' , join ( ',' , @$ globalFlags )
if scalar ( @$ globalFlags ) ;
2019-03-14 19:04:48 +03:00
if ( my $ vmstate = $ conf - > { vmstate } ) {
my $ statepath = PVE::Storage:: path ( $ storecfg , $ vmstate ) ;
2019-10-22 17:31:16 +03:00
push @$ vollist , $ vmstate ;
2019-03-14 19:04:48 +03:00
push @$ cmd , '-loadstate' , $ statepath ;
2019-11-29 13:06:46 +03:00
print "activating and using '$vmstate' as vmstate\n" ;
2019-03-14 19:04:48 +03:00
}
2018-12-06 12:17:25 +03:00
# add custom args
if ( $ conf - > { args } ) {
my $ aa = PVE::Tools:: split_args ( $ conf - > { args } ) ;
push @$ cmd , @$ aa ;
}
2013-07-24 13:24:20 +04:00
return wantarray ? ( $ cmd , $ vollist , $ spice_port ) : $ cmd ;
2011-08-23 09:47:04 +04:00
}
2011-09-12 14:26:00 +04:00
2013-07-17 13:33:02 +04:00
sub spice_port {
2013-06-25 09:10:42 +04:00
my ( $ vmid ) = @ _ ;
2013-07-17 13:33:02 +04:00
2019-11-19 14:23:47 +03:00
my $ res = mon_cmd ( $ vmid , 'query-spice' ) ;
2013-07-17 13:33:02 +04:00
return $ res - > { 'tls-port' } || $ res - > { 'port' } || die "no spice port\n" ;
2013-06-25 09:10:42 +04:00
}
2011-10-10 18:46:54 +04:00
sub vm_devices_list {
my ( $ vmid ) = @ _ ;
2019-11-19 14:23:47 +03:00
my $ res = mon_cmd ( $ vmid , 'query-pci' ) ;
2018-04-12 18:04:56 +03:00
my $ devices_to_check = [] ;
2012-07-13 10:42:13 +04:00
my $ devices = { } ;
foreach my $ pcibus ( @$ res ) {
2018-04-12 18:04:56 +03:00
push @$ devices_to_check , @ { $ pcibus - > { devices } } ,
}
while ( @$ devices_to_check ) {
my $ to_check = [] ;
for my $ d ( @$ devices_to_check ) {
$ devices - > { $ d - > { 'qdev_id' } } = 1 if $ d - > { 'qdev_id' } ;
next if ! $ d - > { 'pci_bridge' } ;
$ devices - > { $ d - > { 'qdev_id' } } += scalar ( @ { $ d - > { 'pci_bridge' } - > { devices } } ) ;
push @$ to_check , @ { $ d - > { 'pci_bridge' } - > { devices } } ;
2014-08-29 17:04:15 +04:00
}
2018-04-12 18:04:56 +03:00
$ devices_to_check = $ to_check ;
2014-08-29 17:04:15 +04:00
}
2019-11-19 14:23:47 +03:00
my $ resblock = mon_cmd ( $ vmid , 'query-block' ) ;
2014-08-29 17:04:15 +04:00
foreach my $ block ( @$ resblock ) {
if ( $ block - > { device } =~ m/^drive-(\S+)/ ) {
$ devices - > { $ 1 } = 1 ;
2011-10-11 15:28:50 +04:00
}
}
2011-10-10 18:46:54 +04:00
2019-11-19 14:23:47 +03:00
my $ resmice = mon_cmd ( $ vmid , 'query-mice' ) ;
2014-11-18 15:29:21 +03:00
foreach my $ mice ( @$ resmice ) {
if ( $ mice - > { name } eq 'QEMU HID Tablet' ) {
$ devices - > { tablet } = 1 ;
last ;
}
}
2016-06-14 11:50:38 +03:00
# for usb devices there is no query-usb
# but we can iterate over the entries in
# qom-list path=/machine/peripheral
2019-11-19 14:23:47 +03:00
my $ resperipheral = mon_cmd ( $ vmid , 'qom-list' , path = > '/machine/peripheral' ) ;
2016-06-14 11:50:38 +03:00
foreach my $ per ( @$ resperipheral ) {
if ( $ per - > { name } =~ m/^usb\d+$/ ) {
$ devices - > { $ per - > { name } } = 1 ;
}
}
2011-10-11 15:28:50 +04:00
return $ devices ;
2011-10-10 18:46:54 +04:00
}
2012-01-20 14:42:03 +04:00
sub vm_deviceplug {
2018-11-12 16:10:42 +03:00
my ( $ storecfg , $ conf , $ vmid , $ deviceid , $ device , $ arch , $ machine_type ) = @ _ ;
2012-02-03 13:23:50 +04:00
2019-11-19 14:23:48 +03:00
my $ q35 = PVE::QemuServer::Machine:: machine_type_is_q35 ( $ conf ) ;
2014-06-18 08:54:45 +04:00
2012-02-05 17:19:06 +04:00
my $ devices_list = vm_devices_list ( $ vmid ) ;
return 1 if defined ( $ devices_list - > { $ deviceid } ) ;
2018-11-12 16:10:42 +03:00
qemu_add_pci_bridge ( $ storecfg , $ conf , $ vmid , $ deviceid , $ arch , $ machine_type ) ; # add PCI bridge if we need it for the device
2014-11-25 11:13:37 +03:00
2014-11-18 15:29:21 +03:00
if ( $ deviceid eq 'tablet' ) {
2014-11-25 11:13:37 +03:00
2018-11-12 16:10:42 +03:00
qemu_deviceadd ( $ vmid , print_tabletdevice_full ( $ conf , $ arch ) ) ;
} elsif ( $ deviceid eq 'keyboard' ) {
qemu_deviceadd ( $ vmid , print_keyboarddevice_full ( $ conf , $ arch ) ) ;
2014-11-18 15:29:21 +03:00
2016-06-14 11:50:40 +03:00
} elsif ( $ deviceid =~ m/^usb(\d+)$/ ) {
2016-07-04 16:24:20 +03:00
die "usb hotplug currently not reliable\n" ;
# since we can't reliably hot unplug all added usb devices
# and usb passthrough disables live migration
# we disable usb hotplugging for now
2016-06-14 11:50:40 +03:00
qemu_deviceadd ( $ vmid , PVE::QemuServer::USB:: print_usbdevice_full ( $ conf , $ deviceid , $ device ) ) ;
2014-11-25 11:13:37 +03:00
} elsif ( $ deviceid =~ m/^(virtio)(\d+)$/ ) {
2012-08-20 15:06:59 +04:00
2015-03-19 13:06:12 +03:00
qemu_iothread_add ( $ vmid , $ deviceid , $ device ) ;
2014-11-25 11:13:37 +03:00
qemu_driveadd ( $ storecfg , $ vmid , $ device ) ;
2018-11-12 16:10:42 +03:00
my $ devicefull = print_drivedevice_full ( $ storecfg , $ conf , $ vmid , $ device , $ arch , $ machine_type ) ;
2014-11-25 11:13:37 +03:00
2012-01-20 14:42:05 +04:00
qemu_deviceadd ( $ vmid , $ devicefull ) ;
2014-11-25 11:13:37 +03:00
eval { qemu_deviceaddverify ( $ vmid , $ deviceid ) ; } ;
if ( my $ err = $@ ) {
2014-11-25 10:29:12 +03:00
eval { qemu_drivedel ( $ vmid , $ deviceid ) ; } ;
warn $@ if $@ ;
2014-11-25 11:13:37 +03:00
die $ err ;
2012-01-20 14:42:05 +04:00
}
2012-01-20 14:42:06 +04:00
2015-03-27 05:41:53 +03:00
} elsif ( $ deviceid =~ m/^(virtioscsi|scsihw)(\d+)$/ ) {
2014-11-25 11:13:37 +03:00
2015-04-01 06:11:43 +03:00
2012-07-30 16:58:40 +04:00
my $ scsihw = defined ( $ conf - > { scsihw } ) ? $ conf - > { scsihw } : "lsi" ;
2018-11-12 16:10:42 +03:00
my $ pciaddr = print_pci_addr ( $ deviceid , undef , $ arch , $ machine_type ) ;
2015-03-27 08:16:24 +03:00
my $ scsihw_type = $ scsihw eq 'virtio-scsi-single' ? "virtio-scsi-pci" : $ scsihw ;
2015-03-27 05:41:53 +03:00
my $ devicefull = "$scsihw_type,id=$deviceid$pciaddr" ;
2014-11-25 11:13:37 +03:00
2015-04-01 06:11:43 +03:00
if ( $ deviceid =~ m/^virtioscsi(\d+)$/ && $ device - > { iothread } ) {
qemu_iothread_add ( $ vmid , $ deviceid , $ device ) ;
$ devicefull . = ",iothread=iothread-$deviceid" ;
}
2015-04-02 07:10:54 +03:00
if ( $ deviceid =~ m/^virtioscsi(\d+)$/ && $ device - > { queues } ) {
$ devicefull . = ",num_queues=$device->{queues}" ;
}
2012-01-20 14:42:06 +04:00
qemu_deviceadd ( $ vmid , $ devicefull ) ;
2014-11-25 11:13:37 +03:00
qemu_deviceaddverify ( $ vmid , $ deviceid ) ;
2012-01-20 14:42:06 +04:00
2014-11-25 11:13:37 +03:00
} elsif ( $ deviceid =~ m/^(scsi)(\d+)$/ ) {
2018-11-12 16:10:42 +03:00
qemu_findorcreatescsihw ( $ storecfg , $ conf , $ vmid , $ device , $ arch , $ machine_type ) ;
2014-11-25 11:13:37 +03:00
qemu_driveadd ( $ storecfg , $ vmid , $ device ) ;
2015-03-27 08:16:24 +03:00
2018-11-12 16:10:42 +03:00
my $ devicefull = print_drivedevice_full ( $ storecfg , $ conf , $ vmid , $ device , $ arch , $ machine_type ) ;
2014-11-25 11:13:37 +03:00
eval { qemu_deviceadd ( $ vmid , $ devicefull ) ; } ;
if ( my $ err = $@ ) {
2014-11-25 10:29:12 +03:00
eval { qemu_drivedel ( $ vmid , $ deviceid ) ; } ;
warn $@ if $@ ;
2014-11-25 11:13:37 +03:00
die $ err ;
2012-01-20 14:42:07 +04:00
}
2014-11-25 11:13:37 +03:00
} elsif ( $ deviceid =~ m/^(net)(\d+)$/ ) {
2019-02-28 11:16:00 +03:00
return undef if ! qemu_netdevadd ( $ vmid , $ conf , $ arch , $ device , $ deviceid ) ;
2015-11-06 12:27:06 +03:00
2019-11-19 14:23:48 +03:00
my $ machine_type = PVE::QemuServer::Machine:: qemu_machine_pxe ( $ vmid , $ conf ) ;
2019-02-28 11:16:00 +03:00
my $ use_old_bios_files = undef ;
( $ use_old_bios_files , $ machine_type ) = qemu_use_old_bios_files ( $ machine_type ) ;
2015-11-06 12:27:06 +03:00
2019-02-28 11:16:00 +03:00
my $ netdevicefull = print_netdevice_full ( $ vmid , $ conf , $ device , $ deviceid , undef , $ use_old_bios_files , $ arch , $ machine_type ) ;
qemu_deviceadd ( $ vmid , $ netdevicefull ) ;
2019-02-28 11:15:59 +03:00
eval {
qemu_deviceaddverify ( $ vmid , $ deviceid ) ;
qemu_set_link_status ( $ vmid , $ deviceid , ! $ device - > { link_down } ) ;
} ;
2014-11-25 11:13:37 +03:00
if ( my $ err = $@ ) {
eval { qemu_netdevdel ( $ vmid , $ deviceid ) ; } ;
warn $@ if $@ ;
die $ err ;
2019-02-28 11:16:00 +03:00
}
2012-01-28 14:02:29 +04:00
2014-11-25 11:13:37 +03:00
} elsif ( ! $ q35 && $ deviceid =~ m/^(pci\.)(\d+)$/ ) {
2014-11-10 08:31:08 +03:00
2012-08-20 15:06:59 +04:00
my $ bridgeid = $ 2 ;
2018-11-12 16:10:42 +03:00
my $ pciaddr = print_pci_addr ( $ deviceid , undef , $ arch , $ machine_type ) ;
2012-08-20 15:06:59 +04:00
my $ devicefull = "pci-bridge,id=pci.$bridgeid,chassis_nr=$bridgeid$pciaddr" ;
2015-03-27 08:16:24 +03:00
2012-08-20 15:06:59 +04:00
qemu_deviceadd ( $ vmid , $ devicefull ) ;
2014-11-25 11:13:37 +03:00
qemu_deviceaddverify ( $ vmid , $ deviceid ) ;
} else {
2015-03-27 08:16:24 +03:00
die "can't hotplug device '$deviceid'\n" ;
2012-08-20 15:06:59 +04:00
}
2012-01-20 14:42:05 +04:00
return 1 ;
2011-10-10 18:46:55 +04:00
}
2014-11-25 08:58:33 +03:00
# fixme: this should raise exceptions on error!
2012-01-20 14:42:03 +04:00
sub vm_deviceunplug {
2011-10-14 12:14:11 +04:00
my ( $ vmid , $ conf , $ deviceid ) = @ _ ;
2011-10-10 18:46:56 +04:00
2012-02-05 17:19:06 +04:00
my $ devices_list = vm_devices_list ( $ vmid ) ;
return 1 if ! defined ( $ devices_list - > { $ deviceid } ) ;
2014-11-25 10:29:12 +03:00
die "can't unplug bootdisk" if $ conf - > { bootdisk } && $ conf - > { bootdisk } eq $ deviceid ;
2018-11-12 16:10:42 +03:00
if ( $ deviceid eq 'tablet' || $ deviceid eq 'keyboard' ) {
2014-11-25 10:29:12 +03:00
2014-11-18 15:29:21 +03:00
qemu_devicedel ( $ vmid , $ deviceid ) ;
2016-06-14 11:50:40 +03:00
} elsif ( $ deviceid =~ m/^usb\d+$/ ) {
2016-07-04 16:24:20 +03:00
die "usb hotplug currently not reliable\n" ;
# when unplugging usb devices this way,
# there may be remaining usb controllers/hubs
# so we disable it for now
2016-06-14 11:50:40 +03:00
qemu_devicedel ( $ vmid , $ deviceid ) ;
qemu_devicedelverify ( $ vmid , $ deviceid ) ;
2014-11-25 10:29:12 +03:00
} elsif ( $ deviceid =~ m/^(virtio)(\d+)$/ ) {
2011-10-14 12:14:11 +04:00
2012-01-20 14:42:05 +04:00
qemu_devicedel ( $ vmid , $ deviceid ) ;
2014-11-25 10:29:12 +03:00
qemu_devicedelverify ( $ vmid , $ deviceid ) ;
qemu_drivedel ( $ vmid , $ deviceid ) ;
2015-03-19 13:06:12 +03:00
qemu_iothread_del ( $ conf , $ vmid , $ deviceid ) ;
2015-03-27 05:41:53 +03:00
} elsif ( $ deviceid =~ m/^(virtioscsi|scsihw)(\d+)$/ ) {
2015-03-27 08:16:24 +03:00
2014-11-25 10:29:12 +03:00
qemu_devicedel ( $ vmid , $ deviceid ) ;
2015-03-18 13:08:04 +03:00
qemu_devicedelverify ( $ vmid , $ deviceid ) ;
2015-04-01 06:11:43 +03:00
qemu_iothread_del ( $ conf , $ vmid , $ deviceid ) ;
2015-03-27 08:16:24 +03:00
2014-11-25 10:29:12 +03:00
} elsif ( $ deviceid =~ m/^(scsi)(\d+)$/ ) {
2012-01-20 14:42:06 +04:00
2014-11-25 10:29:12 +03:00
qemu_devicedel ( $ vmid , $ deviceid ) ;
qemu_drivedel ( $ vmid , $ deviceid ) ;
2015-03-27 08:16:24 +03:00
qemu_deletescsihw ( $ conf , $ vmid , $ deviceid ) ;
2015-03-18 13:08:04 +03:00
2014-11-25 10:29:12 +03:00
} elsif ( $ deviceid =~ m/^(net)(\d+)$/ ) {
2012-01-20 14:42:07 +04:00
2012-01-28 14:02:29 +04:00
qemu_devicedel ( $ vmid , $ deviceid ) ;
2014-11-25 10:29:12 +03:00
qemu_devicedelverify ( $ vmid , $ deviceid ) ;
qemu_netdevdel ( $ vmid , $ deviceid ) ;
} else {
die "can't unplug device '$deviceid'\n" ;
2012-01-28 14:02:29 +04:00
}
2012-01-20 14:42:05 +04:00
return 1 ;
}
sub qemu_deviceadd {
my ( $ vmid , $ devicefull ) = @ _ ;
2011-10-10 18:46:56 +04:00
2013-02-19 13:22:07 +04:00
$ devicefull = "driver=" . $ devicefull ;
my % options = split ( /[=,]/ , $ devicefull ) ;
2011-10-14 12:14:11 +04:00
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "device_add" , % options ) ;
2012-01-20 14:42:05 +04:00
}
2012-01-27 12:35:26 +04:00
2012-01-20 14:42:05 +04:00
sub qemu_devicedel {
2014-11-25 11:13:37 +03:00
my ( $ vmid , $ deviceid ) = @ _ ;
2014-11-25 10:29:12 +03:00
2019-11-19 14:23:47 +03:00
my $ ret = mon_cmd ( $ vmid , "device_del" , id = > $ deviceid ) ;
2012-01-20 14:42:05 +04:00
}
2015-03-19 13:06:12 +03:00
sub qemu_iothread_add {
my ( $ vmid , $ deviceid , $ device ) = @ _ ;
if ( $ device - > { iothread } ) {
my $ iothreads = vm_iothreads_list ( $ vmid ) ;
qemu_objectadd ( $ vmid , "iothread-$deviceid" , "iothread" ) if ! $ iothreads - > { "iothread-$deviceid" } ;
}
}
sub qemu_iothread_del {
my ( $ conf , $ vmid , $ deviceid ) = @ _ ;
2019-03-13 19:28:04 +03:00
my $ confid = $ deviceid ;
if ( $ deviceid =~ m/^(?:virtioscsi|scsihw)(\d+)$/ ) {
$ confid = 'scsi' . $ 1 ;
}
my $ device = parse_drive ( $ confid , $ conf - > { $ confid } ) ;
2015-03-19 13:06:12 +03:00
if ( $ device - > { iothread } ) {
my $ iothreads = vm_iothreads_list ( $ vmid ) ;
qemu_objectdel ( $ vmid , "iothread-$deviceid" ) if $ iothreads - > { "iothread-$deviceid" } ;
}
}
memory hotplug patch v10
This patch allow to hotplug memory dimm modules
though a new option : dimm_memory
The dimm modules are generated from a map
dimmid size dimm_memory
dimm0 512 512 100.00 0
dimm1 512 1024 50.00 1
dimm2 512 1536 33.33 2
dimm3 512 2048 25.00 3
dimm4 512 2560 20.00 0
dimm5 512 3072 16.67 1
dimm6 512 3584 14.29 2
dimm7 512 4096 12.50 3
dimm8 512 4608 11.11 0
dimm9 512 5120 10.00 1
dimm10 512 5632 9.09 2
dimm11 512 6144 8.33 3
dimm12 512 6656 7.69 0
dimm13 512 7168 7.14 1
dimm14 512 7680 6.67 2
dimm15 512 8192 6.25 3
dimm16 512 8704 5.88 0
dimm17 512 9216 5.56 1
dimm18 512 9728 5.26 2
dimm19 512 10240 5.00 3
dimm20 512 10752 4.76 0
...
dimm241 65536 3260416 2.01 1
dimm242 65536 3325952 1.97 2
dimm243 65536 3391488 1.93 3
dimm244 65536 3457024 1.90 0
dimm245 65536 3522560 1.86 1
dimm246 65536 3588096 1.83 2
dimm247 65536 3653632 1.79 3
dimm248 65536 3719168 1.76 0
dimm249 65536 3784704 1.73 1
dimm250 65536 3850240 1.70 2
dimm251 65536 3915776 1.67 3
dimm252 65536 3981312 1.65 0
dimm253 65536 4046848 1.62 1
dimm254 65536 4112384 1.59 2
dimm255 65536 4177920 1.57 3
max dimm_memory size is 4TB, which is the current qemu limit
If the dimm_memory value is not aligned on memory module, we align the dimm_memory on the next module.
vmid.conf
---------
memory: 1024
numa:1
hotplug: memmory
when hotplug memory option is enabled, the minimum memory value must be 1GB, and also numa need to be enabled.
we assign the first 1GB as static memory, splitted on each numa nodes.
The remaining memory is assigned on hotpluggable dimm devices.
The static memory need to be also 128MB aligned, to have other dimm devices aligned too.
This 128MB alignment is a linux limitation, windows can align on 2MB size.
Numa need to be aligned, as linux guest don't boot on some setup with multi sockets,
and windows need numa to be able to hotplug memory
hotplug
----
qm set <vmid> -memory X (where X is bigger than current value)
unplug (not yet implemented in qemu)
------
qm set <vmid> -memory X (where X is lower than current value)
linux guest
-----------
-acpi hotplug module should be loaded in guest
-need a recent kernel. (tested with 3.10)
can be enable automaticaly, adding:
/lib/udev/rules.d/80-hotplug-cpu-mem.rules
SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", \
ATTR{online}="1"
SUBSYSTEM=="memory", ACTION=="add", TEST=="state", ATTR{state}=="offline", \
ATTR{state}="online"
windows guest
-------------
tested with:
- windows 2012 standard
- windows 2008 enterprise/datacenter
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2015-01-28 08:47:24 +03:00
sub qemu_objectadd {
my ( $ vmid , $ objectid , $ qomtype ) = @ _ ;
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "object-add" , id = > $ objectid , "qom-type" = > $ qomtype ) ;
memory hotplug patch v10
This patch allow to hotplug memory dimm modules
though a new option : dimm_memory
The dimm modules are generated from a map
dimmid size dimm_memory
dimm0 512 512 100.00 0
dimm1 512 1024 50.00 1
dimm2 512 1536 33.33 2
dimm3 512 2048 25.00 3
dimm4 512 2560 20.00 0
dimm5 512 3072 16.67 1
dimm6 512 3584 14.29 2
dimm7 512 4096 12.50 3
dimm8 512 4608 11.11 0
dimm9 512 5120 10.00 1
dimm10 512 5632 9.09 2
dimm11 512 6144 8.33 3
dimm12 512 6656 7.69 0
dimm13 512 7168 7.14 1
dimm14 512 7680 6.67 2
dimm15 512 8192 6.25 3
dimm16 512 8704 5.88 0
dimm17 512 9216 5.56 1
dimm18 512 9728 5.26 2
dimm19 512 10240 5.00 3
dimm20 512 10752 4.76 0
...
dimm241 65536 3260416 2.01 1
dimm242 65536 3325952 1.97 2
dimm243 65536 3391488 1.93 3
dimm244 65536 3457024 1.90 0
dimm245 65536 3522560 1.86 1
dimm246 65536 3588096 1.83 2
dimm247 65536 3653632 1.79 3
dimm248 65536 3719168 1.76 0
dimm249 65536 3784704 1.73 1
dimm250 65536 3850240 1.70 2
dimm251 65536 3915776 1.67 3
dimm252 65536 3981312 1.65 0
dimm253 65536 4046848 1.62 1
dimm254 65536 4112384 1.59 2
dimm255 65536 4177920 1.57 3
max dimm_memory size is 4TB, which is the current qemu limit
If the dimm_memory value is not aligned on memory module, we align the dimm_memory on the next module.
vmid.conf
---------
memory: 1024
numa:1
hotplug: memmory
when hotplug memory option is enabled, the minimum memory value must be 1GB, and also numa need to be enabled.
we assign the first 1GB as static memory, splitted on each numa nodes.
The remaining memory is assigned on hotpluggable dimm devices.
The static memory need to be also 128MB aligned, to have other dimm devices aligned too.
This 128MB alignment is a linux limitation, windows can align on 2MB size.
Numa need to be aligned, as linux guest don't boot on some setup with multi sockets,
and windows need numa to be able to hotplug memory
hotplug
----
qm set <vmid> -memory X (where X is bigger than current value)
unplug (not yet implemented in qemu)
------
qm set <vmid> -memory X (where X is lower than current value)
linux guest
-----------
-acpi hotplug module should be loaded in guest
-need a recent kernel. (tested with 3.10)
can be enable automaticaly, adding:
/lib/udev/rules.d/80-hotplug-cpu-mem.rules
SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", \
ATTR{online}="1"
SUBSYSTEM=="memory", ACTION=="add", TEST=="state", ATTR{state}=="offline", \
ATTR{state}="online"
windows guest
-------------
tested with:
- windows 2012 standard
- windows 2008 enterprise/datacenter
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2015-01-28 08:47:24 +03:00
return 1 ;
}
sub qemu_objectdel {
my ( $ vmid , $ objectid ) = @ _ ;
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "object-del" , id = > $ objectid ) ;
memory hotplug patch v10
This patch allow to hotplug memory dimm modules
though a new option : dimm_memory
The dimm modules are generated from a map
dimmid size dimm_memory
dimm0 512 512 100.00 0
dimm1 512 1024 50.00 1
dimm2 512 1536 33.33 2
dimm3 512 2048 25.00 3
dimm4 512 2560 20.00 0
dimm5 512 3072 16.67 1
dimm6 512 3584 14.29 2
dimm7 512 4096 12.50 3
dimm8 512 4608 11.11 0
dimm9 512 5120 10.00 1
dimm10 512 5632 9.09 2
dimm11 512 6144 8.33 3
dimm12 512 6656 7.69 0
dimm13 512 7168 7.14 1
dimm14 512 7680 6.67 2
dimm15 512 8192 6.25 3
dimm16 512 8704 5.88 0
dimm17 512 9216 5.56 1
dimm18 512 9728 5.26 2
dimm19 512 10240 5.00 3
dimm20 512 10752 4.76 0
...
dimm241 65536 3260416 2.01 1
dimm242 65536 3325952 1.97 2
dimm243 65536 3391488 1.93 3
dimm244 65536 3457024 1.90 0
dimm245 65536 3522560 1.86 1
dimm246 65536 3588096 1.83 2
dimm247 65536 3653632 1.79 3
dimm248 65536 3719168 1.76 0
dimm249 65536 3784704 1.73 1
dimm250 65536 3850240 1.70 2
dimm251 65536 3915776 1.67 3
dimm252 65536 3981312 1.65 0
dimm253 65536 4046848 1.62 1
dimm254 65536 4112384 1.59 2
dimm255 65536 4177920 1.57 3
max dimm_memory size is 4TB, which is the current qemu limit
If the dimm_memory value is not aligned on memory module, we align the dimm_memory on the next module.
vmid.conf
---------
memory: 1024
numa:1
hotplug: memmory
when hotplug memory option is enabled, the minimum memory value must be 1GB, and also numa need to be enabled.
we assign the first 1GB as static memory, splitted on each numa nodes.
The remaining memory is assigned on hotpluggable dimm devices.
The static memory need to be also 128MB aligned, to have other dimm devices aligned too.
This 128MB alignment is a linux limitation, windows can align on 2MB size.
Numa need to be aligned, as linux guest don't boot on some setup with multi sockets,
and windows need numa to be able to hotplug memory
hotplug
----
qm set <vmid> -memory X (where X is bigger than current value)
unplug (not yet implemented in qemu)
------
qm set <vmid> -memory X (where X is lower than current value)
linux guest
-----------
-acpi hotplug module should be loaded in guest
-need a recent kernel. (tested with 3.10)
can be enable automaticaly, adding:
/lib/udev/rules.d/80-hotplug-cpu-mem.rules
SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", \
ATTR{online}="1"
SUBSYSTEM=="memory", ACTION=="add", TEST=="state", ATTR{state}=="offline", \
ATTR{state}="online"
windows guest
-------------
tested with:
- windows 2012 standard
- windows 2008 enterprise/datacenter
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2015-01-28 08:47:24 +03:00
return 1 ;
}
2012-01-20 14:42:05 +04:00
sub qemu_driveadd {
2014-11-25 11:13:37 +03:00
my ( $ storecfg , $ vmid , $ device ) = @ _ ;
2012-01-20 14:42:05 +04:00
2020-03-02 13:33:45 +03:00
my $ drive = print_drive_commandline_full ( $ storecfg , $ vmid , $ device ) ;
2015-03-05 12:34:10 +03:00
$ drive =~ s/\\/\\\\/g ;
2019-11-19 14:23:47 +03:00
my $ ret = PVE::QemuServer::Monitor:: hmp_cmd ( $ vmid , "drive_add auto \"$drive\"" ) ;
2014-11-25 11:13:37 +03:00
2012-01-20 14:42:05 +04:00
# If the command succeeds qemu prints: "OK"
2014-11-25 11:13:37 +03:00
return 1 if $ ret =~ m/OK/s ;
die "adding drive failed: $ret\n" ;
2012-01-20 14:42:05 +04:00
}
2012-01-27 12:35:26 +04:00
2012-01-20 14:42:05 +04:00
sub qemu_drivedel {
my ( $ vmid , $ deviceid ) = @ _ ;
2011-10-10 18:46:56 +04:00
2019-11-19 14:23:47 +03:00
my $ ret = PVE::QemuServer::Monitor:: hmp_cmd ( $ vmid , "drive_del drive-$deviceid" ) ;
2012-01-20 14:42:05 +04:00
$ ret =~ s/^\s+// ;
2015-03-27 08:16:24 +03:00
2014-11-25 10:29:12 +03:00
return 1 if $ ret eq "" ;
2015-03-27 08:16:24 +03:00
2014-11-25 10:29:12 +03:00
# NB: device not found errors mean the drive was auto-deleted and we ignore the error
2015-03-27 08:16:24 +03:00
return 1 if $ ret =~ m/Device \'.*?\' not found/s ;
2014-11-25 10:29:12 +03:00
die "deleting drive $deviceid failed : $ret\n" ;
2012-01-20 14:42:05 +04:00
}
2011-10-14 12:14:11 +04:00
2012-01-20 14:42:05 +04:00
sub qemu_deviceaddverify {
2014-11-25 11:13:37 +03:00
my ( $ vmid , $ deviceid ) = @ _ ;
2011-10-10 18:46:56 +04:00
2012-01-20 14:42:05 +04:00
for ( my $ i = 0 ; $ i <= 5 ; $ i + + ) {
my $ devices_list = vm_devices_list ( $ vmid ) ;
return 1 if defined ( $ devices_list - > { $ deviceid } ) ;
sleep 1 ;
2012-01-27 12:35:26 +04:00
}
2014-11-25 11:13:37 +03:00
die "error on hotplug device '$deviceid'\n" ;
2012-01-20 14:42:05 +04:00
}
2012-01-27 12:35:26 +04:00
2012-01-20 14:42:05 +04:00
sub qemu_devicedelverify {
2014-11-25 10:29:12 +03:00
my ( $ vmid , $ deviceid ) = @ _ ;
2015-03-27 08:16:24 +03:00
# need to verify that the device is correctly removed as device_del
2014-11-25 10:29:12 +03:00
# is async and empty return is not reliable
2012-01-20 14:42:05 +04:00
for ( my $ i = 0 ; $ i <= 5 ; $ i + + ) {
my $ devices_list = vm_devices_list ( $ vmid ) ;
return 1 if ! defined ( $ devices_list - > { $ deviceid } ) ;
sleep 1 ;
2012-01-27 12:35:26 +04:00
}
2014-11-25 10:29:12 +03:00
die "error on hot-unplugging device '$deviceid'\n" ;
2011-10-10 18:46:56 +04:00
}
2012-07-30 16:58:40 +04:00
sub qemu_findorcreatescsihw {
2018-11-12 16:10:42 +03:00
my ( $ storecfg , $ conf , $ vmid , $ device , $ arch , $ machine_type ) = @ _ ;
2012-01-20 14:42:06 +04:00
2015-03-27 05:41:54 +03:00
my ( $ maxdev , $ controller , $ controller_prefix ) = scsihw_infos ( $ conf , $ device ) ;
2015-03-27 05:41:53 +03:00
my $ scsihwid = "$controller_prefix$controller" ;
2012-01-20 14:42:06 +04:00
my $ devices_list = vm_devices_list ( $ vmid ) ;
2012-07-30 16:58:40 +04:00
if ( ! defined ( $ devices_list - > { $ scsihwid } ) ) {
2018-11-12 16:10:42 +03:00
vm_deviceplug ( $ storecfg , $ conf , $ vmid , $ scsihwid , $ device , $ arch , $ machine_type ) ;
2012-01-20 14:42:06 +04:00
}
2014-11-25 11:13:37 +03:00
2012-01-20 14:42:06 +04:00
return 1 ;
}
2015-03-18 13:08:04 +03:00
sub qemu_deletescsihw {
my ( $ conf , $ vmid , $ opt ) = @ _ ;
my $ device = parse_drive ( $ opt , $ conf - > { $ opt } ) ;
2015-03-27 08:15:01 +03:00
if ( $ conf - > { scsihw } && ( $ conf - > { scsihw } eq 'virtio-scsi-single' ) ) {
2015-03-27 05:41:53 +03:00
vm_deviceunplug ( $ vmid , $ conf , "virtioscsi$device->{index}" ) ;
return 1 ;
}
2015-03-27 05:41:54 +03:00
my ( $ maxdev , $ controller , $ controller_prefix ) = scsihw_infos ( $ conf , $ device ) ;
2015-03-18 13:08:04 +03:00
my $ devices_list = vm_devices_list ( $ vmid ) ;
foreach my $ opt ( keys % { $ devices_list } ) {
2020-03-02 13:33:44 +03:00
if ( is_valid_drivename ( $ opt ) ) {
my $ drive = parse_drive ( $ opt , $ conf - > { $ opt } ) ;
2015-03-18 13:08:04 +03:00
if ( $ drive - > { interface } eq 'scsi' && $ drive - > { index } < ( ( $ maxdev - 1 ) * ( $ controller + 1 ) ) ) {
return 1 ;
}
}
}
my $ scsihwid = "scsihw$controller" ;
vm_deviceunplug ( $ vmid , $ conf , $ scsihwid ) ;
return 1 ;
}
2014-11-24 12:13:21 +03:00
sub qemu_add_pci_bridge {
2018-11-12 16:10:42 +03:00
my ( $ storecfg , $ conf , $ vmid , $ device , $ arch , $ machine_type ) = @ _ ;
2012-08-20 15:06:59 +04:00
my $ bridges = { } ;
2014-11-24 12:13:21 +03:00
my $ bridgeid ;
2018-11-12 16:10:42 +03:00
print_pci_addr ( $ device , $ bridges , $ arch , $ machine_type ) ;
2012-08-20 15:06:59 +04:00
while ( my ( $ k , $ v ) = each %$ bridges ) {
$ bridgeid = $ k ;
}
2014-11-25 11:13:37 +03:00
return 1 if ! defined ( $ bridgeid ) || $ bridgeid < 1 ;
2014-11-24 12:13:21 +03:00
2012-08-20 15:06:59 +04:00
my $ bridge = "pci.$bridgeid" ;
my $ devices_list = vm_devices_list ( $ vmid ) ;
2014-11-24 12:13:21 +03:00
if ( ! defined ( $ devices_list - > { $ bridge } ) ) {
2018-11-12 16:10:42 +03:00
vm_deviceplug ( $ storecfg , $ conf , $ vmid , $ bridge , $ arch , $ machine_type ) ;
2012-08-20 15:06:59 +04:00
}
2014-11-24 12:13:21 +03:00
2012-08-20 15:06:59 +04:00
return 1 ;
}
2015-01-20 13:47:11 +03:00
sub qemu_set_link_status {
my ( $ vmid , $ device , $ up ) = @ _ ;
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "set_link" , name = > $ device ,
2015-01-20 13:47:11 +03:00
up = > $ up ? JSON:: true : JSON:: false ) ;
}
2012-01-28 14:02:29 +04:00
sub qemu_netdevadd {
2018-11-12 16:10:42 +03:00
my ( $ vmid , $ conf , $ arch , $ device , $ deviceid ) = @ _ ;
2012-01-28 14:02:29 +04:00
2018-11-12 16:10:42 +03:00
my $ netdev = print_netdev_full ( $ vmid , $ conf , $ arch , $ device , $ deviceid , 1 ) ;
2013-02-19 13:22:09 +04:00
my % options = split ( /[=,]/ , $ netdev ) ;
2012-01-28 14:02:29 +04:00
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "netdev_add" , % options ) ;
2013-02-19 13:22:09 +04:00
return 1 ;
2012-01-28 14:02:29 +04:00
}
sub qemu_netdevdel {
my ( $ vmid , $ deviceid ) = @ _ ;
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "netdev_del" , id = > $ deviceid ) ;
2012-01-28 14:02:29 +04:00
}
2016-06-14 11:50:39 +03:00
sub qemu_usb_hotplug {
2018-11-12 16:10:42 +03:00
my ( $ storecfg , $ conf , $ vmid , $ deviceid , $ device , $ arch , $ machine_type ) = @ _ ;
2016-06-14 11:50:39 +03:00
return if ! $ device ;
# remove the old one first
vm_deviceunplug ( $ vmid , $ conf , $ deviceid ) ;
# check if xhci controller is necessary and available
if ( $ device - > { usb3 } ) {
my $ devicelist = vm_devices_list ( $ vmid ) ;
if ( ! $ devicelist - > { xhci } ) {
2018-11-12 16:10:42 +03:00
my $ pciaddr = print_pci_addr ( "xhci" , undef , $ arch , $ machine_type ) ;
2016-06-14 11:50:39 +03:00
qemu_deviceadd ( $ vmid , "nec-usb-xhci,id=xhci$pciaddr" ) ;
}
}
my $ d = parse_usb_device ( $ device - > { host } ) ;
$ d - > { usb3 } = $ device - > { usb3 } ;
# add the new one
2018-11-12 16:10:42 +03:00
vm_deviceplug ( $ storecfg , $ conf , $ vmid , $ deviceid , $ d , $ arch , $ machine_type ) ;
2016-06-14 11:50:39 +03:00
}
2014-01-07 16:32:51 +04:00
sub qemu_cpu_hotplug {
2015-01-09 18:30:36 +03:00
my ( $ vmid , $ conf , $ vcpus ) = @ _ ;
2014-01-07 16:32:51 +04:00
2019-11-19 14:23:48 +03:00
my $ machine_type = PVE::QemuServer::Machine:: get_current_qemu_machine ( $ vmid ) ;
2016-10-17 13:18:56 +03:00
2015-01-09 18:30:36 +03:00
my $ sockets = 1 ;
$ sockets = $ conf - > { smp } if $ conf - > { smp } ; # old style - no longer iused
$ sockets = $ conf - > { sockets } if $ conf - > { sockets } ;
my $ cores = $ conf - > { cores } || 1 ;
my $ maxcpus = $ sockets * $ cores ;
2014-01-07 16:32:51 +04:00
2015-01-09 18:30:36 +03:00
$ vcpus = $ maxcpus if ! $ vcpus ;
2014-11-19 14:59:02 +03:00
2015-01-09 18:30:36 +03:00
die "you can't add more vcpus than maxcpus\n"
if $ vcpus > $ maxcpus ;
2014-11-19 14:59:02 +03:00
2015-01-09 18:30:36 +03:00
my $ currentvcpus = $ conf - > { vcpus } || $ maxcpus ;
2016-10-17 13:18:56 +03:00
2016-10-17 13:18:57 +03:00
if ( $ vcpus < $ currentvcpus ) {
2016-10-17 13:18:56 +03:00
2019-11-19 14:23:49 +03:00
if ( PVE::QemuServer::Machine:: machine_version ( $ machine_type , 2 , 7 ) ) {
2016-10-17 13:18:56 +03:00
for ( my $ i = $ currentvcpus ; $ i > $ vcpus ; $ i - - ) {
qemu_devicedel ( $ vmid , "cpu$i" ) ;
my $ retry = 0 ;
my $ currentrunningvcpus = undef ;
while ( 1 ) {
2020-02-06 12:53:55 +03:00
$ currentrunningvcpus = mon_cmd ( $ vmid , "query-cpus-fast" ) ;
2016-10-17 13:18:56 +03:00
last if scalar ( @ { $ currentrunningvcpus } ) == $ i - 1 ;
2016-10-17 15:49:05 +03:00
raise_param_exc ( { vcpus = > "error unplugging cpu$i" } ) if $ retry > 5 ;
2016-10-17 13:18:56 +03:00
$ retry + + ;
sleep 1 ;
}
#update conf after each succesfull cpu unplug
$ conf - > { vcpus } = scalar ( @ { $ currentrunningvcpus } ) ;
PVE::QemuConfig - > write_config ( $ vmid , $ conf ) ;
}
} else {
2016-10-17 15:49:05 +03:00
die "cpu hot-unplugging requires qemu version 2.7 or higher\n" ;
2016-10-17 13:18:56 +03:00
}
return ;
}
2014-01-07 16:32:51 +04:00
2020-02-06 12:53:55 +03:00
my $ currentrunningvcpus = mon_cmd ( $ vmid , "query-cpus-fast" ) ;
2016-10-17 15:49:05 +03:00
die "vcpus in running vm does not match its configuration\n"
2015-01-09 18:30:36 +03:00
if scalar ( @ { $ currentrunningvcpus } ) != $ currentvcpus ;
2014-01-07 16:32:51 +04:00
2019-11-19 14:23:49 +03:00
if ( PVE::QemuServer::Machine:: machine_version ( $ machine_type , 2 , 7 ) ) {
2016-10-17 13:18:57 +03:00
for ( my $ i = $ currentvcpus + 1 ; $ i <= $ vcpus ; $ i + + ) {
my $ cpustr = print_cpu_device ( $ conf , $ i ) ;
qemu_deviceadd ( $ vmid , $ cpustr ) ;
my $ retry = 0 ;
my $ currentrunningvcpus = undef ;
while ( 1 ) {
2020-02-06 12:53:55 +03:00
$ currentrunningvcpus = mon_cmd ( $ vmid , "query-cpus-fast" ) ;
2016-10-17 13:18:57 +03:00
last if scalar ( @ { $ currentrunningvcpus } ) == $ i ;
2016-10-17 15:49:05 +03:00
raise_param_exc ( { vcpus = > "error hotplugging cpu$i" } ) if $ retry > 10 ;
2016-10-17 13:18:57 +03:00
sleep 1 ;
$ retry + + ;
}
#update conf after each succesfull cpu hotplug
$ conf - > { vcpus } = scalar ( @ { $ currentrunningvcpus } ) ;
PVE::QemuConfig - > write_config ( $ vmid , $ conf ) ;
}
} else {
for ( my $ i = $ currentvcpus ; $ i < $ vcpus ; $ i + + ) {
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "cpu-add" , id = > int ( $ i ) ) ;
2016-10-17 13:18:57 +03:00
}
2014-01-07 16:32:51 +04:00
}
}
2012-05-09 16:29:29 +04:00
sub qemu_block_set_io_throttle {
2016-01-11 15:27:46 +03:00
my ( $ vmid , $ deviceid ,
$ bps , $ bps_rd , $ bps_wr , $ iops , $ iops_rd , $ iops_wr ,
2016-11-03 10:17:28 +03:00
$ bps_max , $ bps_rd_max , $ bps_wr_max , $ iops_max , $ iops_rd_max , $ iops_wr_max ,
$ bps_max_length , $ bps_rd_max_length , $ bps_wr_max_length ,
$ iops_max_length , $ iops_rd_max_length , $ iops_wr_max_length ) = @ _ ;
2012-05-09 16:29:29 +04:00
2012-07-15 19:19:06 +04:00
return if ! check_running ( $ vmid ) ;
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "block_set_io_throttle" , device = > $ deviceid ,
2016-01-11 15:27:46 +03:00
bps = > int ( $ bps ) ,
bps_rd = > int ( $ bps_rd ) ,
bps_wr = > int ( $ bps_wr ) ,
iops = > int ( $ iops ) ,
iops_rd = > int ( $ iops_rd ) ,
iops_wr = > int ( $ iops_wr ) ,
bps_max = > int ( $ bps_max ) ,
bps_rd_max = > int ( $ bps_rd_max ) ,
bps_wr_max = > int ( $ bps_wr_max ) ,
iops_max = > int ( $ iops_max ) ,
iops_rd_max = > int ( $ iops_rd_max ) ,
2016-11-03 10:17:28 +03:00
iops_wr_max = > int ( $ iops_wr_max ) ,
bps_max_length = > int ( $ bps_max_length ) ,
bps_rd_max_length = > int ( $ bps_rd_max_length ) ,
bps_wr_max_length = > int ( $ bps_wr_max_length ) ,
iops_max_length = > int ( $ iops_max_length ) ,
iops_rd_max_length = > int ( $ iops_rd_max_length ) ,
iops_wr_max_length = > int ( $ iops_wr_max_length ) ,
2016-01-11 15:27:46 +03:00
) ;
2012-07-15 19:19:06 +04:00
2012-05-09 16:29:29 +04:00
}
2012-08-23 09:36:48 +04:00
# old code, only used to shutdown old VM after update
2012-08-17 12:34:39 +04:00
sub __read_avail {
my ( $ fh , $ timeout ) = @ _ ;
my $ sel = new IO:: Select ;
$ sel - > add ( $ fh ) ;
my $ res = '' ;
my $ buf ;
my @ ready ;
while ( scalar ( @ ready = $ sel - > can_read ( $ timeout ) ) ) {
my $ count ;
if ( $ count = $ fh - > sysread ( $ buf , 8192 ) ) {
if ( $ buf =~ /^(.*)\(qemu\) $/s ) {
$ res . = $ 1 ;
last ;
} else {
$ res . = $ buf ;
}
} else {
if ( ! defined ( $ count ) ) {
die "$!\n" ;
}
last ;
}
}
die "monitor read timeout\n" if ! scalar ( @ ready ) ;
2012-08-23 09:36:48 +04:00
2012-08-17 12:34:39 +04:00
return $ res ;
}
2012-08-06 13:56:34 +04:00
sub qemu_block_resize {
my ( $ vmid , $ deviceid , $ storecfg , $ volid , $ size ) = @ _ ;
2013-01-04 09:57:11 +04:00
my $ running = check_running ( $ vmid ) ;
2012-08-06 13:56:34 +04:00
2017-01-16 10:45:10 +03:00
$ size = 0 if ! PVE::Storage:: volume_resize ( $ storecfg , $ volid , $ size , $ running ) ;
2012-08-06 13:56:34 +04:00
return if ! $ running ;
2020-02-19 13:31:30 +03:00
my $ padding = ( 1024 - $ size % 1024 ) % 1024 ;
$ size = $ size + $ padding ;
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "block_resize" , device = > $ deviceid , size = > int ( $ size ) ) ;
2012-08-06 13:56:34 +04:00
}
2012-09-06 12:33:34 +04:00
sub qemu_volume_snapshot {
my ( $ vmid , $ deviceid , $ storecfg , $ volid , $ snap ) = @ _ ;
2013-01-04 09:57:11 +04:00
my $ running = check_running ( $ vmid ) ;
2012-09-06 12:33:34 +04:00
2015-05-06 10:57:34 +03:00
if ( $ running && do_snapshots_with_qemu ( $ storecfg , $ volid ) ) {
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , 'blockdev-snapshot-internal-sync' , device = > $ deviceid , name = > $ snap ) ;
2015-05-06 10:57:34 +03:00
} else {
PVE::Storage:: volume_snapshot ( $ storecfg , $ volid , $ snap ) ;
}
2012-09-06 12:33:34 +04:00
}
2012-09-06 12:33:40 +04:00
sub qemu_volume_snapshot_delete {
my ( $ vmid , $ deviceid , $ storecfg , $ volid , $ snap ) = @ _ ;
2013-01-04 09:57:11 +04:00
my $ running = check_running ( $ vmid ) ;
2012-09-06 12:33:40 +04:00
2018-07-11 14:55:53 +03:00
if ( $ running ) {
$ running = undef ;
my $ conf = PVE::QemuConfig - > load_config ( $ vmid ) ;
foreach_drive ( $ conf , sub {
my ( $ ds , $ drive ) = @ _ ;
$ running = 1 if $ drive - > { file } eq $ volid ;
} ) ;
}
2016-11-14 15:01:21 +03:00
if ( $ running && do_snapshots_with_qemu ( $ storecfg , $ volid ) ) {
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , 'blockdev-snapshot-delete-internal-sync' , device = > $ deviceid , name = > $ snap ) ;
2016-11-14 15:01:21 +03:00
} else {
PVE::Storage:: volume_snapshot_delete ( $ storecfg , $ volid , $ snap , $ running ) ;
}
2012-09-06 12:33:40 +04:00
}
2014-01-14 13:33:36 +04:00
sub set_migration_caps {
my ( $ vmid ) = @ _ ;
2014-01-09 13:58:48 +04:00
2014-01-10 16:09:19 +04:00
my $ cap_ref = [] ;
2014-01-09 13:58:48 +04:00
my $ enabled_cap = {
2014-01-10 16:09:19 +04:00
"auto-converge" = > 1 ,
2015-09-25 14:50:36 +03:00
"xbzrle" = > 1 ,
2014-01-10 16:09:19 +04:00
"x-rdma-pin-all" = > 0 ,
"zero-blocks" = > 0 ,
2015-09-25 14:50:37 +03:00
"compress" = > 0
2014-01-09 13:58:48 +04:00
} ;
2019-11-19 14:23:47 +03:00
my $ supported_capabilities = mon_cmd ( $ vmid , "query-migrate-capabilities" ) ;
2014-01-09 13:58:48 +04:00
2014-01-10 16:09:19 +04:00
for my $ supported_capability ( @$ supported_capabilities ) {
2014-02-08 23:20:55 +04:00
push @$ cap_ref , {
capability = > $ supported_capability - > { capability } ,
2014-02-10 11:03:50 +04:00
state = > $ enabled_cap - > { $ supported_capability - > { capability } } ? JSON:: true : JSON:: false ,
} ;
2014-01-09 13:58:48 +04:00
}
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "migrate-set-capabilities" , capabilities = > $ cap_ref ) ;
2014-01-10 16:09:19 +04:00
}
2014-01-09 13:58:48 +04:00
2015-01-07 12:02:32 +03:00
my $ fast_plug_option = {
2015-02-15 11:04:30 +03:00
'lock' = > 1 ,
2015-01-07 12:02:32 +03:00
'name' = > 1 ,
2015-03-27 08:16:24 +03:00
'onboot' = > 1 ,
2015-01-07 12:02:32 +03:00
'shares' = > 1 ,
'startup' = > 1 ,
2015-08-11 12:24:41 +03:00
'description' = > 1 ,
2016-07-19 10:17:36 +03:00
'protection' = > 1 ,
2017-05-15 15:11:59 +03:00
'vmstatestorage' = > 1 ,
2019-01-31 16:33:39 +03:00
'hookscript' = > 1 ,
2019-10-31 15:36:25 +03:00
'tags' = > 1 ,
2015-01-07 12:02:32 +03:00
} ;
2014-11-19 14:59:02 +03:00
# hotplug changes in [PENDING]
# $selection hash can be used to only apply specified options, for
# example: { cores => 1 } (only apply changed 'cores')
# $errors ref is used to return error messages
2014-11-17 11:50:31 +03:00
sub vmconfig_hotplug_pending {
2014-11-19 14:59:02 +03:00
my ( $ vmid , $ conf , $ storecfg , $ selection , $ errors ) = @ _ ;
2014-11-17 11:50:31 +03:00
2014-11-19 12:43:42 +03:00
my $ defaults = load_defaults ( ) ;
2019-11-25 10:56:58 +03:00
my $ arch = get_vm_arch ( $ conf ) ;
my $ machine_type = get_vm_machine ( $ conf , undef , $ arch ) ;
2014-11-17 11:50:31 +03:00
# commit values which do not have any impact on running VM first
2014-11-19 14:59:02 +03:00
# Note: those option cannot raise errors, we we do not care about
# $selection and always apply them.
my $ add_error = sub {
my ( $ opt , $ msg ) = @ _ ;
$ errors - > { $ opt } = "hotplug problem - $msg" ;
} ;
2014-11-17 11:50:31 +03:00
my $ changes = 0 ;
foreach my $ opt ( keys % { $ conf - > { pending } } ) { # add/change
2015-01-07 12:02:32 +03:00
if ( $ fast_plug_option - > { $ opt } ) {
2014-11-17 11:50:31 +03:00
$ conf - > { $ opt } = $ conf - > { pending } - > { $ opt } ;
delete $ conf - > { pending } - > { $ opt } ;
$ changes = 1 ;
}
}
if ( $ changes ) {
2016-03-07 14:41:12 +03:00
PVE::QemuConfig - > write_config ( $ vmid , $ conf ) ;
2014-11-17 11:50:31 +03:00
}
2015-01-27 09:16:22 +03:00
my $ hotplug_features = parse_hotplug_features ( defined ( $ conf - > { hotplug } ) ? $ conf - > { hotplug } : '1' ) ;
2014-11-17 11:50:31 +03:00
2019-10-14 11:28:38 +03:00
my $ pending_delete_hash = PVE::QemuConfig - > parse_pending_delete ( $ conf - > { pending } - > { delete } ) ;
2019-10-22 13:47:16 +03:00
foreach my $ opt ( sort keys %$ pending_delete_hash ) {
2014-11-19 14:59:02 +03:00
next if $ selection && ! $ selection - > { $ opt } ;
2019-10-22 13:47:16 +03:00
my $ force = $ pending_delete_hash - > { $ opt } - > { force } ;
2014-11-19 14:59:02 +03:00
eval {
2015-02-09 18:47:52 +03:00
if ( $ opt eq 'hotplug' ) {
die "skip\n" if ( $ conf - > { hotplug } =~ /memory/ ) ;
} elsif ( $ opt eq 'tablet' ) {
2015-01-27 09:16:22 +03:00
die "skip\n" if ! $ hotplug_features - > { usb } ;
2014-11-19 14:59:02 +03:00
if ( $ defaults - > { tablet } ) {
2018-11-12 16:10:42 +03:00
vm_deviceplug ( $ storecfg , $ conf , $ vmid , 'tablet' , $ arch , $ machine_type ) ;
vm_deviceplug ( $ storecfg , $ conf , $ vmid , 'keyboard' , $ arch , $ machine_type )
if $ arch eq 'aarch64' ;
2014-11-19 14:59:02 +03:00
} else {
2018-11-12 16:10:42 +03:00
vm_deviceunplug ( $ vmid , $ conf , 'tablet' ) ;
vm_deviceunplug ( $ vmid , $ conf , 'keyboard' ) if $ arch eq 'aarch64' ;
2014-11-19 14:59:02 +03:00
}
2016-06-14 11:50:40 +03:00
} elsif ( $ opt =~ m/^usb\d+/ ) {
2016-07-04 16:24:20 +03:00
die "skip\n" ;
# since we cannot reliably hot unplug usb devices
# we are disabling it
2016-06-14 11:50:40 +03:00
die "skip\n" if ! $ hotplug_features - > { usb } || $ conf - > { $ opt } =~ m/spice/i ;
vm_deviceunplug ( $ vmid , $ conf , $ opt ) ;
2015-01-09 18:30:36 +03:00
} elsif ( $ opt eq 'vcpus' ) {
2015-01-27 09:16:22 +03:00
die "skip\n" if ! $ hotplug_features - > { cpu } ;
2015-01-09 18:30:36 +03:00
qemu_cpu_hotplug ( $ vmid , $ conf , undef ) ;
2015-01-02 17:16:01 +03:00
} elsif ( $ opt eq 'balloon' ) {
2015-01-07 12:02:32 +03:00
# enable balloon device is not hotpluggable
2018-05-14 15:03:04 +03:00
die "skip\n" if defined ( $ conf - > { balloon } ) && $ conf - > { balloon } == 0 ;
# here we reset the ballooning value to memory
my $ balloon = $ conf - > { memory } || $ defaults - > { memory } ;
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "balloon" , value = > $ balloon * 1024 * 1024 ) ;
2015-01-07 12:02:32 +03:00
} elsif ( $ fast_plug_option - > { $ opt } ) {
# do nothing
2014-11-25 08:58:33 +03:00
} elsif ( $ opt =~ m/^net(\d+)$/ ) {
2015-01-27 09:16:22 +03:00
die "skip\n" if ! $ hotplug_features - > { network } ;
2014-11-25 08:58:33 +03:00
vm_deviceunplug ( $ vmid , $ conf , $ opt ) ;
2016-03-03 17:45:15 +03:00
} elsif ( is_valid_drivename ( $ opt ) ) {
2015-01-27 09:16:22 +03:00
die "skip\n" if ! $ hotplug_features - > { disk } || $ opt =~ m/(ide|sata)(\d+)/ ;
2015-01-02 17:15:58 +03:00
vm_deviceunplug ( $ vmid , $ conf , $ opt ) ;
2015-08-12 14:38:36 +03:00
vmconfig_delete_or_detach_drive ( $ vmid , $ storecfg , $ conf , $ opt , $ force ) ;
memory hotplug patch v10
This patch allow to hotplug memory dimm modules
though a new option : dimm_memory
The dimm modules are generated from a map
dimmid size dimm_memory
dimm0 512 512 100.00 0
dimm1 512 1024 50.00 1
dimm2 512 1536 33.33 2
dimm3 512 2048 25.00 3
dimm4 512 2560 20.00 0
dimm5 512 3072 16.67 1
dimm6 512 3584 14.29 2
dimm7 512 4096 12.50 3
dimm8 512 4608 11.11 0
dimm9 512 5120 10.00 1
dimm10 512 5632 9.09 2
dimm11 512 6144 8.33 3
dimm12 512 6656 7.69 0
dimm13 512 7168 7.14 1
dimm14 512 7680 6.67 2
dimm15 512 8192 6.25 3
dimm16 512 8704 5.88 0
dimm17 512 9216 5.56 1
dimm18 512 9728 5.26 2
dimm19 512 10240 5.00 3
dimm20 512 10752 4.76 0
...
dimm241 65536 3260416 2.01 1
dimm242 65536 3325952 1.97 2
dimm243 65536 3391488 1.93 3
dimm244 65536 3457024 1.90 0
dimm245 65536 3522560 1.86 1
dimm246 65536 3588096 1.83 2
dimm247 65536 3653632 1.79 3
dimm248 65536 3719168 1.76 0
dimm249 65536 3784704 1.73 1
dimm250 65536 3850240 1.70 2
dimm251 65536 3915776 1.67 3
dimm252 65536 3981312 1.65 0
dimm253 65536 4046848 1.62 1
dimm254 65536 4112384 1.59 2
dimm255 65536 4177920 1.57 3
max dimm_memory size is 4TB, which is the current qemu limit
If the dimm_memory value is not aligned on memory module, we align the dimm_memory on the next module.
vmid.conf
---------
memory: 1024
numa:1
hotplug: memmory
when hotplug memory option is enabled, the minimum memory value must be 1GB, and also numa need to be enabled.
we assign the first 1GB as static memory, splitted on each numa nodes.
The remaining memory is assigned on hotpluggable dimm devices.
The static memory need to be also 128MB aligned, to have other dimm devices aligned too.
This 128MB alignment is a linux limitation, windows can align on 2MB size.
Numa need to be aligned, as linux guest don't boot on some setup with multi sockets,
and windows need numa to be able to hotplug memory
hotplug
----
qm set <vmid> -memory X (where X is bigger than current value)
unplug (not yet implemented in qemu)
------
qm set <vmid> -memory X (where X is lower than current value)
linux guest
-----------
-acpi hotplug module should be loaded in guest
-need a recent kernel. (tested with 3.10)
can be enable automaticaly, adding:
/lib/udev/rules.d/80-hotplug-cpu-mem.rules
SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", \
ATTR{online}="1"
SUBSYSTEM=="memory", ACTION=="add", TEST=="state", ATTR{state}=="offline", \
ATTR{state}="online"
windows guest
-------------
tested with:
- windows 2012 standard
- windows 2008 enterprise/datacenter
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2015-01-28 08:47:24 +03:00
} elsif ( $ opt =~ m/^memory$/ ) {
die "skip\n" if ! $ hotplug_features - > { memory } ;
2016-05-23 10:47:51 +03:00
PVE::QemuServer::Memory:: qemu_memory_hotplug ( $ vmid , $ conf , $ defaults , $ opt ) ;
2015-05-28 16:59:22 +03:00
} elsif ( $ opt eq 'cpuunits' ) {
cgroups_write ( "cpu" , $ vmid , "cpu.shares" , $ defaults - > { cpuunits } ) ;
2015-06-02 09:06:45 +03:00
} elsif ( $ opt eq 'cpulimit' ) {
cgroups_write ( "cpu" , $ vmid , "cpu.cfs_quota_us" , - 1 ) ;
2014-11-18 15:29:21 +03:00
} else {
2014-11-24 12:33:51 +03:00
die "skip\n" ;
2014-11-18 15:29:21 +03:00
}
2014-11-19 14:59:02 +03:00
} ;
if ( my $ err = $@ ) {
2014-11-24 12:33:51 +03:00
& $ add_error ( $ opt , $ err ) if $ err ne "skip\n" ;
} else {
2014-11-19 14:59:02 +03:00
delete $ conf - > { $ opt } ;
2019-10-14 11:28:38 +03:00
PVE::QemuConfig - > remove_from_pending_delete ( $ conf , $ opt ) ;
2014-11-18 15:29:21 +03:00
}
}
2019-11-19 11:26:44 +03:00
my ( $ apply_pending_cloudinit , $ apply_pending_cloudinit_done ) ;
2015-08-17 16:46:07 +03:00
$ apply_pending_cloudinit = sub {
2019-11-19 11:26:44 +03:00
return if $ apply_pending_cloudinit_done ; # once is enough
$ apply_pending_cloudinit_done = 1 ; # once is enough
2015-08-17 16:46:07 +03:00
my ( $ key , $ value ) = @ _ ;
my @ cloudinit_opts = keys %$ confdesc_cloudinit ;
foreach my $ opt ( keys % { $ conf - > { pending } } ) {
next if ! grep { $ _ eq $ opt } @ cloudinit_opts ;
$ conf - > { $ opt } = delete $ conf - > { pending } - > { $ opt } ;
}
my $ new_conf = { %$ conf } ;
$ new_conf - > { $ key } = $ value ;
PVE::QemuServer::Cloudinit:: generate_cloudinitconfig ( $ new_conf , $ vmid ) ;
} ;
2014-11-18 15:29:21 +03:00
foreach my $ opt ( keys % { $ conf - > { pending } } ) {
2014-11-19 14:59:02 +03:00
next if $ selection && ! $ selection - > { $ opt } ;
2014-11-18 15:29:21 +03:00
my $ value = $ conf - > { pending } - > { $ opt } ;
2014-11-19 14:59:02 +03:00
eval {
2015-02-09 18:47:52 +03:00
if ( $ opt eq 'hotplug' ) {
die "skip\n" if ( $ value =~ /memory/ ) || ( $ value !~ /memory/ && $ conf - > { hotplug } =~ /memory/ ) ;
} elsif ( $ opt eq 'tablet' ) {
2015-01-27 09:16:22 +03:00
die "skip\n" if ! $ hotplug_features - > { usb } ;
2014-11-19 14:59:02 +03:00
if ( $ value == 1 ) {
2018-11-12 16:10:42 +03:00
vm_deviceplug ( $ storecfg , $ conf , $ vmid , 'tablet' , $ arch , $ machine_type ) ;
vm_deviceplug ( $ storecfg , $ conf , $ vmid , 'keyboard' , $ arch , $ machine_type )
if $ arch eq 'aarch64' ;
2014-11-19 14:59:02 +03:00
} elsif ( $ value == 0 ) {
2018-11-12 16:10:42 +03:00
vm_deviceunplug ( $ vmid , $ conf , 'tablet' ) ;
vm_deviceunplug ( $ vmid , $ conf , 'keyboard' ) if $ arch eq 'aarch64' ;
2014-11-19 14:59:02 +03:00
}
2016-06-14 11:50:40 +03:00
} elsif ( $ opt =~ m/^usb\d+$/ ) {
2016-07-04 16:24:20 +03:00
die "skip\n" ;
# since we cannot reliably hot unplug usb devices
# we are disabling it
2016-06-14 11:50:40 +03:00
die "skip\n" if ! $ hotplug_features - > { usb } || $ value =~ m/spice/i ;
my $ d = eval { PVE::JSONSchema:: parse_property_string ( $ usbdesc - > { format } , $ value ) } ;
die "skip\n" if ! $ d ;
2018-11-12 16:10:42 +03:00
qemu_usb_hotplug ( $ storecfg , $ conf , $ vmid , $ opt , $ d , $ arch , $ machine_type ) ;
2015-01-09 18:30:36 +03:00
} elsif ( $ opt eq 'vcpus' ) {
2015-01-27 09:16:22 +03:00
die "skip\n" if ! $ hotplug_features - > { cpu } ;
2014-11-19 14:59:02 +03:00
qemu_cpu_hotplug ( $ vmid , $ conf , $ value ) ;
} elsif ( $ opt eq 'balloon' ) {
2015-01-07 12:02:32 +03:00
# enable/disable balloning device is not hotpluggable
2015-01-12 17:04:31 +03:00
my $ old_balloon_enabled = ! ! ( ! defined ( $ conf - > { balloon } ) || $ conf - > { balloon } ) ;
2015-03-27 08:16:24 +03:00
my $ new_balloon_enabled = ! ! ( ! defined ( $ conf - > { pending } - > { balloon } ) || $ conf - > { pending } - > { balloon } ) ;
2015-01-07 12:02:32 +03:00
die "skip\n" if $ old_balloon_enabled != $ new_balloon_enabled ;
2014-11-19 14:59:02 +03:00
# allow manual ballooning if shares is set to zero
2015-03-02 18:03:22 +03:00
if ( ( defined ( $ conf - > { shares } ) && ( $ conf - > { shares } == 0 ) ) ) {
2015-01-02 17:16:01 +03:00
my $ balloon = $ conf - > { pending } - > { balloon } || $ conf - > { memory } || $ defaults - > { memory } ;
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "balloon" , value = > $ balloon * 1024 * 1024 ) ;
2015-01-02 17:16:01 +03:00
}
2015-03-27 08:16:24 +03:00
} elsif ( $ opt =~ m/^net(\d+)$/ ) {
2014-11-25 08:58:33 +03:00
# some changes can be done without hotplug
2015-03-27 08:16:24 +03:00
vmconfig_update_net ( $ storecfg , $ conf , $ hotplug_features - > { network } ,
2018-11-12 16:10:42 +03:00
$ vmid , $ opt , $ value , $ arch , $ machine_type ) ;
2016-03-03 17:45:15 +03:00
} elsif ( is_valid_drivename ( $ opt ) ) {
2019-11-26 19:07:49 +03:00
die "skip\n" if $ opt eq 'efidisk0' ;
2014-11-25 13:37:37 +03:00
# some changes can be done without hotplug
2015-08-17 16:46:07 +03:00
my $ drive = parse_drive ( $ opt , $ value ) ;
if ( drive_is_cloudinit ( $ drive ) ) {
& $ apply_pending_cloudinit ( $ opt , $ value ) ;
}
2015-01-27 09:16:22 +03:00
vmconfig_update_disk ( $ storecfg , $ conf , $ hotplug_features - > { disk } ,
2020-02-06 12:53:51 +03:00
$ vmid , $ opt , $ value , $ arch , $ machine_type ) ;
memory hotplug patch v10
This patch allow to hotplug memory dimm modules
though a new option : dimm_memory
The dimm modules are generated from a map
dimmid size dimm_memory
dimm0 512 512 100.00 0
dimm1 512 1024 50.00 1
dimm2 512 1536 33.33 2
dimm3 512 2048 25.00 3
dimm4 512 2560 20.00 0
dimm5 512 3072 16.67 1
dimm6 512 3584 14.29 2
dimm7 512 4096 12.50 3
dimm8 512 4608 11.11 0
dimm9 512 5120 10.00 1
dimm10 512 5632 9.09 2
dimm11 512 6144 8.33 3
dimm12 512 6656 7.69 0
dimm13 512 7168 7.14 1
dimm14 512 7680 6.67 2
dimm15 512 8192 6.25 3
dimm16 512 8704 5.88 0
dimm17 512 9216 5.56 1
dimm18 512 9728 5.26 2
dimm19 512 10240 5.00 3
dimm20 512 10752 4.76 0
...
dimm241 65536 3260416 2.01 1
dimm242 65536 3325952 1.97 2
dimm243 65536 3391488 1.93 3
dimm244 65536 3457024 1.90 0
dimm245 65536 3522560 1.86 1
dimm246 65536 3588096 1.83 2
dimm247 65536 3653632 1.79 3
dimm248 65536 3719168 1.76 0
dimm249 65536 3784704 1.73 1
dimm250 65536 3850240 1.70 2
dimm251 65536 3915776 1.67 3
dimm252 65536 3981312 1.65 0
dimm253 65536 4046848 1.62 1
dimm254 65536 4112384 1.59 2
dimm255 65536 4177920 1.57 3
max dimm_memory size is 4TB, which is the current qemu limit
If the dimm_memory value is not aligned on memory module, we align the dimm_memory on the next module.
vmid.conf
---------
memory: 1024
numa:1
hotplug: memmory
when hotplug memory option is enabled, the minimum memory value must be 1GB, and also numa need to be enabled.
we assign the first 1GB as static memory, splitted on each numa nodes.
The remaining memory is assigned on hotpluggable dimm devices.
The static memory need to be also 128MB aligned, to have other dimm devices aligned too.
This 128MB alignment is a linux limitation, windows can align on 2MB size.
Numa need to be aligned, as linux guest don't boot on some setup with multi sockets,
and windows need numa to be able to hotplug memory
hotplug
----
qm set <vmid> -memory X (where X is bigger than current value)
unplug (not yet implemented in qemu)
------
qm set <vmid> -memory X (where X is lower than current value)
linux guest
-----------
-acpi hotplug module should be loaded in guest
-need a recent kernel. (tested with 3.10)
can be enable automaticaly, adding:
/lib/udev/rules.d/80-hotplug-cpu-mem.rules
SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", \
ATTR{online}="1"
SUBSYSTEM=="memory", ACTION=="add", TEST=="state", ATTR{state}=="offline", \
ATTR{state}="online"
windows guest
-------------
tested with:
- windows 2012 standard
- windows 2008 enterprise/datacenter
Signed-off-by: Alexandre Derumier <aderumier@odiso.com>
2015-01-28 08:47:24 +03:00
} elsif ( $ opt =~ m/^memory$/ ) { #dimms
die "skip\n" if ! $ hotplug_features - > { memory } ;
2016-05-23 10:47:51 +03:00
$ value = PVE::QemuServer::Memory:: qemu_memory_hotplug ( $ vmid , $ conf , $ defaults , $ opt , $ value ) ;
2015-05-28 16:59:22 +03:00
} elsif ( $ opt eq 'cpuunits' ) {
cgroups_write ( "cpu" , $ vmid , "cpu.shares" , $ conf - > { pending } - > { $ opt } ) ;
2015-06-02 09:06:45 +03:00
} elsif ( $ opt eq 'cpulimit' ) {
2015-06-02 17:03:25 +03:00
my $ cpulimit = $ conf - > { pending } - > { $ opt } == 0 ? - 1 : int ( $ conf - > { pending } - > { $ opt } * 100000 ) ;
2015-06-02 09:06:45 +03:00
cgroups_write ( "cpu" , $ vmid , "cpu.cfs_quota_us" , $ cpulimit ) ;
2014-11-19 14:59:02 +03:00
} else {
2014-11-24 12:33:51 +03:00
die "skip\n" ; # skip non-hot-pluggable options
2014-11-18 15:29:21 +03:00
}
2014-11-19 14:59:02 +03:00
} ;
if ( my $ err = $@ ) {
2014-11-24 12:33:51 +03:00
& $ add_error ( $ opt , $ err ) if $ err ne "skip\n" ;
} else {
2014-11-19 14:59:02 +03:00
$ conf - > { $ opt } = $ value ;
delete $ conf - > { pending } - > { $ opt } ;
2014-11-18 15:29:21 +03:00
}
}
2019-12-13 14:41:51 +03:00
PVE::QemuConfig - > write_config ( $ vmid , $ conf ) ;
2014-11-17 11:50:31 +03:00
}
2014-11-17 09:08:44 +03:00
2015-08-12 14:38:36 +03:00
sub try_deallocate_drive {
my ( $ storecfg , $ vmid , $ conf , $ key , $ drive , $ rpcenv , $ authuser , $ force ) = @ _ ;
if ( ( $ force || $ key =~ /^unused/ ) && ! drive_is_cdrom ( $ drive , 1 ) ) {
my $ volid = $ drive - > { file } ;
if ( vm_is_volid_owner ( $ storecfg , $ vmid , $ volid ) ) {
my $ sid = PVE::Storage:: parse_volume_id ( $ volid ) ;
$ rpcenv - > check ( $ authuser , "/storage/$sid" , [ 'Datastore.AllocateSpace' ] ) ;
2015-08-13 12:15:56 +03:00
# check if the disk is really unused
die "unable to delete '$volid' - volume is still in use (snapshot?)\n"
2020-03-02 13:33:44 +03:00
if PVE::QemuServer::Drive:: is_volume_in_use ( $ storecfg , $ conf , $ key , $ volid ) ;
2015-08-13 12:15:56 +03:00
PVE::Storage:: vdisk_free ( $ storecfg , $ volid ) ;
2015-08-12 14:38:36 +03:00
return 1 ;
2015-08-20 11:34:59 +03:00
} else {
# If vm is not owner of this disk remove from config
return 1 ;
2015-08-12 14:38:36 +03:00
}
}
return undef ;
}
sub vmconfig_delete_or_detach_drive {
my ( $ vmid , $ storecfg , $ conf , $ opt , $ force ) = @ _ ;
my $ drive = parse_drive ( $ opt , $ conf - > { $ opt } ) ;
my $ rpcenv = PVE::RPCEnvironment:: get ( ) ;
my $ authuser = $ rpcenv - > get_user ( ) ;
if ( $ force ) {
$ rpcenv - > check_vm_perm ( $ authuser , $ vmid , undef , [ 'VM.Config.Disk' ] ) ;
try_deallocate_drive ( $ storecfg , $ vmid , $ conf , $ opt , $ drive , $ rpcenv , $ authuser , $ force ) ;
} else {
vmconfig_register_unused_drive ( $ storecfg , $ vmid , $ conf , $ drive ) ;
}
}
2019-10-14 11:28:38 +03:00
2014-11-17 09:08:44 +03:00
sub vmconfig_apply_pending {
2020-01-07 18:55:18 +03:00
my ( $ vmid , $ conf , $ storecfg , $ errors ) = @ _ ;
my $ add_apply_error = sub {
my ( $ opt , $ msg ) = @ _ ;
my $ err_msg = "unable to apply pending change $opt : $msg" ;
$ errors - > { $ opt } = $ err_msg ;
warn $ err_msg ;
} ;
2014-11-17 11:50:31 +03:00
# cold plug
2014-11-17 09:08:44 +03:00
2019-10-14 11:28:38 +03:00
my $ pending_delete_hash = PVE::QemuConfig - > parse_pending_delete ( $ conf - > { pending } - > { delete } ) ;
2019-10-22 13:47:16 +03:00
foreach my $ opt ( sort keys %$ pending_delete_hash ) {
2019-10-22 13:34:27 +03:00
my $ force = $ pending_delete_hash - > { $ opt } - > { force } ;
2020-01-07 18:55:18 +03:00
eval {
2020-01-15 16:48:59 +03:00
if ( $ opt =~ m/^unused/ ) {
die "internal error" ;
} elsif ( defined ( $ conf - > { $ opt } ) && is_valid_drivename ( $ opt ) ) {
2020-01-07 18:55:18 +03:00
vmconfig_delete_or_detach_drive ( $ vmid , $ storecfg , $ conf , $ opt , $ force ) ;
}
} ;
if ( my $ err = $@ ) {
$ add_apply_error - > ( $ opt , $ err ) ;
2014-11-17 09:08:44 +03:00
} else {
2019-10-14 11:28:38 +03:00
PVE::QemuConfig - > remove_from_pending_delete ( $ conf , $ opt ) ;
2014-11-17 09:08:44 +03:00
delete $ conf - > { $ opt } ;
}
}
2020-01-15 16:48:59 +03:00
PVE::QemuConfig - > cleanup_pending ( $ conf ) ;
2014-11-17 09:08:44 +03:00
foreach my $ opt ( keys % { $ conf - > { pending } } ) { # add/change
2020-01-15 16:48:59 +03:00
next if $ opt eq 'delete' ; # just to be sure
2020-01-07 18:55:18 +03:00
eval {
2020-01-15 16:48:59 +03:00
if ( defined ( $ conf - > { $ opt } ) && is_valid_drivename ( $ opt ) ) {
2020-01-07 18:55:18 +03:00
vmconfig_register_unused_drive ( $ storecfg , $ vmid , $ conf , parse_drive ( $ opt , $ conf - > { $ opt } ) )
}
} ;
if ( my $ err = $@ ) {
$ add_apply_error - > ( $ opt , $ err ) ;
2014-11-17 09:08:44 +03:00
} else {
2020-01-07 18:55:18 +03:00
$ conf - > { $ opt } = delete $ conf - > { pending } - > { $ opt } ;
2014-11-17 09:08:44 +03:00
}
}
2020-01-15 16:48:59 +03:00
# write all changes at once to avoid unnecessary i/o
PVE::QemuConfig - > write_config ( $ vmid , $ conf ) ;
2014-11-17 09:08:44 +03:00
}
2014-11-25 08:58:33 +03:00
my $ safe_num_ne = sub {
my ( $ a , $ b ) = @ _ ;
return 0 if ! defined ( $ a ) && ! defined ( $ b ) ;
return 1 if ! defined ( $ a ) ;
return 1 if ! defined ( $ b ) ;
return $ a != $ b ;
} ;
my $ safe_string_ne = sub {
my ( $ a , $ b ) = @ _ ;
return 0 if ! defined ( $ a ) && ! defined ( $ b ) ;
return 1 if ! defined ( $ a ) ;
return 1 if ! defined ( $ b ) ;
return $ a ne $ b ;
} ;
sub vmconfig_update_net {
2018-11-12 16:10:42 +03:00
my ( $ storecfg , $ conf , $ hotplug , $ vmid , $ opt , $ value , $ arch , $ machine_type ) = @ _ ;
2014-11-25 08:58:33 +03:00
my $ newnet = parse_net ( $ value ) ;
if ( $ conf - > { $ opt } ) {
my $ oldnet = parse_net ( $ conf - > { $ opt } ) ;
if ( & $ safe_string_ne ( $ oldnet - > { model } , $ newnet - > { model } ) ||
& $ safe_string_ne ( $ oldnet - > { macaddr } , $ newnet - > { macaddr } ) ||
& $ safe_num_ne ( $ oldnet - > { queues } , $ newnet - > { queues } ) ||
! ( $ newnet - > { bridge } && $ oldnet - > { bridge } ) ) { # bridge/nat mode change
# for non online change, we try to hot-unplug
2015-01-21 10:52:37 +03:00
die "skip\n" if ! $ hotplug ;
2014-11-25 08:58:33 +03:00
vm_deviceunplug ( $ vmid , $ conf , $ opt ) ;
} else {
die "internal error" if $ opt !~ m/net(\d+)/ ;
my $ iface = "tap${vmid}i$1" ;
2015-03-27 08:16:24 +03:00
2015-01-20 13:47:11 +03:00
if ( & $ safe_string_ne ( $ oldnet - > { bridge } , $ newnet - > { bridge } ) ||
& $ safe_num_ne ( $ oldnet - > { tag } , $ newnet - > { tag } ) ||
2016-01-18 11:09:05 +03:00
& $ safe_string_ne ( $ oldnet - > { trunks } , $ newnet - > { trunks } ) ||
2015-01-20 13:47:11 +03:00
& $ safe_num_ne ( $ oldnet - > { firewall } , $ newnet - > { firewall } ) ) {
2014-11-25 08:58:33 +03:00
PVE::Network:: tap_unplug ( $ iface ) ;
2016-03-08 15:55:13 +03:00
PVE::Network:: tap_plug ( $ iface , $ newnet - > { bridge } , $ newnet - > { tag } , $ newnet - > { firewall } , $ newnet - > { trunks } , $ newnet - > { rate } ) ;
} elsif ( & $ safe_num_ne ( $ oldnet - > { rate } , $ newnet - > { rate } ) ) {
# Rate can be applied on its own but any change above needs to
# include the rate in tap_plug since OVS resets everything.
PVE::Network:: tap_rate_limit ( $ iface , $ newnet - > { rate } ) ;
2014-11-25 08:58:33 +03:00
}
2014-11-25 14:07:02 +03:00
2015-01-20 13:47:11 +03:00
if ( & $ safe_string_ne ( $ oldnet - > { link_down } , $ newnet - > { link_down } ) ) {
qemu_set_link_status ( $ vmid , $ opt , ! $ newnet - > { link_down } ) ;
}
2014-11-25 14:07:02 +03:00
return 1 ;
2014-11-25 08:58:33 +03:00
}
}
2015-03-27 08:16:24 +03:00
2015-01-21 10:52:37 +03:00
if ( $ hotplug ) {
2018-11-12 16:10:42 +03:00
vm_deviceplug ( $ storecfg , $ conf , $ vmid , $ opt , $ newnet , $ arch , $ machine_type ) ;
2014-11-25 14:07:02 +03:00
} else {
die "skip\n" ;
}
2014-11-25 08:58:33 +03:00
}
2014-11-25 13:37:37 +03:00
sub vmconfig_update_disk {
2020-02-06 12:53:51 +03:00
my ( $ storecfg , $ conf , $ hotplug , $ vmid , $ opt , $ value , $ arch , $ machine_type ) = @ _ ;
2014-11-25 13:37:37 +03:00
my $ drive = parse_drive ( $ opt , $ value ) ;
if ( $ conf - > { $ opt } ) {
if ( my $ old_drive = parse_drive ( $ opt , $ conf - > { $ opt } ) ) {
my $ media = $ drive - > { media } || 'disk' ;
my $ oldmedia = $ old_drive - > { media } || 'disk' ;
die "unable to change media type\n" if $ media ne $ oldmedia ;
if ( ! drive_is_cdrom ( $ old_drive ) ) {
2015-03-27 08:16:24 +03:00
if ( $ drive - > { file } ne $ old_drive - > { file } ) {
2014-11-25 13:37:37 +03:00
2015-01-21 10:52:37 +03:00
die "skip\n" if ! $ hotplug ;
2014-11-25 13:37:37 +03:00
# unplug and register as unused
vm_deviceunplug ( $ vmid , $ conf , $ opt ) ;
vmconfig_register_unused_drive ( $ storecfg , $ vmid , $ conf , $ old_drive )
2015-03-27 08:16:24 +03:00
2014-11-25 13:37:37 +03:00
} else {
# update existing disk
# skip non hotpluggable value
2016-11-03 10:17:14 +03:00
if ( & $ safe_string_ne ( $ drive - > { discard } , $ old_drive - > { discard } ) ||
2015-03-19 13:06:12 +03:00
& $ safe_string_ne ( $ drive - > { iothread } , $ old_drive - > { iothread } ) ||
2015-04-02 07:10:54 +03:00
& $ safe_string_ne ( $ drive - > { queues } , $ old_drive - > { queues } ) ||
2020-01-16 18:06:34 +03:00
& $ safe_string_ne ( $ drive - > { cache } , $ old_drive - > { cache } ) ||
& $ safe_string_ne ( $ drive - > { ssd } , $ old_drive - > { ssd } ) ) {
2014-11-25 13:37:37 +03:00
die "skip\n" ;
}
# apply throttle
if ( & $ safe_num_ne ( $ drive - > { mbps } , $ old_drive - > { mbps } ) ||
& $ safe_num_ne ( $ drive - > { mbps_rd } , $ old_drive - > { mbps_rd } ) ||
& $ safe_num_ne ( $ drive - > { mbps_wr } , $ old_drive - > { mbps_wr } ) ||
& $ safe_num_ne ( $ drive - > { iops } , $ old_drive - > { iops } ) ||
& $ safe_num_ne ( $ drive - > { iops_rd } , $ old_drive - > { iops_rd } ) ||
& $ safe_num_ne ( $ drive - > { iops_wr } , $ old_drive - > { iops_wr } ) ||
& $ safe_num_ne ( $ drive - > { mbps_max } , $ old_drive - > { mbps_max } ) ||
& $ safe_num_ne ( $ drive - > { mbps_rd_max } , $ old_drive - > { mbps_rd_max } ) ||
& $ safe_num_ne ( $ drive - > { mbps_wr_max } , $ old_drive - > { mbps_wr_max } ) ||
& $ safe_num_ne ( $ drive - > { iops_max } , $ old_drive - > { iops_max } ) ||
& $ safe_num_ne ( $ drive - > { iops_rd_max } , $ old_drive - > { iops_rd_max } ) ||
2016-11-03 10:17:28 +03:00
& $ safe_num_ne ( $ drive - > { iops_wr_max } , $ old_drive - > { iops_wr_max } ) ||
& $ safe_num_ne ( $ drive - > { bps_max_length } , $ old_drive - > { bps_max_length } ) ||
& $ safe_num_ne ( $ drive - > { bps_rd_max_length } , $ old_drive - > { bps_rd_max_length } ) ||
& $ safe_num_ne ( $ drive - > { bps_wr_max_length } , $ old_drive - > { bps_wr_max_length } ) ||
& $ safe_num_ne ( $ drive - > { iops_max_length } , $ old_drive - > { iops_max_length } ) ||
& $ safe_num_ne ( $ drive - > { iops_rd_max_length } , $ old_drive - > { iops_rd_max_length } ) ||
& $ safe_num_ne ( $ drive - > { iops_wr_max_length } , $ old_drive - > { iops_wr_max_length } ) ) {
2015-03-27 08:16:24 +03:00
2014-11-25 13:37:37 +03:00
qemu_block_set_io_throttle ( $ vmid , "drive-$opt" ,
( $ drive - > { mbps } || 0 ) * 1024 * 1024 ,
( $ drive - > { mbps_rd } || 0 ) * 1024 * 1024 ,
( $ drive - > { mbps_wr } || 0 ) * 1024 * 1024 ,
$ drive - > { iops } || 0 ,
$ drive - > { iops_rd } || 0 ,
$ drive - > { iops_wr } || 0 ,
( $ drive - > { mbps_max } || 0 ) * 1024 * 1024 ,
( $ drive - > { mbps_rd_max } || 0 ) * 1024 * 1024 ,
( $ drive - > { mbps_wr_max } || 0 ) * 1024 * 1024 ,
$ drive - > { iops_max } || 0 ,
$ drive - > { iops_rd_max } || 0 ,
2016-11-03 10:17:28 +03:00
$ drive - > { iops_wr_max } || 0 ,
$ drive - > { bps_max_length } || 1 ,
$ drive - > { bps_rd_max_length } || 1 ,
$ drive - > { bps_wr_max_length } || 1 ,
$ drive - > { iops_max_length } || 1 ,
$ drive - > { iops_rd_max_length } || 1 ,
$ drive - > { iops_wr_max_length } || 1 ) ;
2014-11-25 13:37:37 +03:00
}
2015-03-27 08:16:24 +03:00
2014-11-25 13:37:37 +03:00
return 1 ;
}
2015-02-12 10:00:14 +03:00
} else { # cdrom
2015-03-27 08:16:24 +03:00
2015-02-12 10:00:14 +03:00
if ( $ drive - > { file } eq 'none' ) {
2020-02-06 12:53:53 +03:00
mon_cmd ( $ vmid , "eject" , force = > JSON:: true , id = > "$opt" ) ;
2016-04-04 11:04:10 +03:00
if ( drive_is_cloudinit ( $ old_drive ) ) {
vmconfig_register_unused_drive ( $ storecfg , $ vmid , $ conf , $ old_drive ) ;
}
2015-02-12 10:00:14 +03:00
} else {
my $ path = get_iso_path ( $ storecfg , $ vmid , $ drive - > { file } ) ;
2020-02-06 12:53:52 +03:00
# force eject if locked
2020-02-06 12:53:53 +03:00
mon_cmd ( $ vmid , "eject" , force = > JSON:: true , id = > "$opt" ) ;
2020-02-06 12:53:52 +03:00
if ( $ path ) {
mon_cmd ( $ vmid , "blockdev-change-medium" ,
2020-02-06 12:53:53 +03:00
id = > "$opt" , filename = > "$path" ) ;
2020-02-06 12:53:52 +03:00
}
2015-02-12 10:00:14 +03:00
}
2015-03-27 08:16:24 +03:00
2015-02-14 11:20:41 +03:00
return 1 ;
2014-11-25 13:37:37 +03:00
}
}
}
2015-03-27 08:16:24 +03:00
die "skip\n" if ! $ hotplug || $ opt =~ m/(ide|sata)(\d+)/ ;
2015-02-12 10:00:14 +03:00
# hotplug new disks
2016-02-04 18:47:56 +03:00
PVE::Storage:: activate_volumes ( $ storecfg , [ $ drive - > { file } ] ) if $ drive - > { file } !~ m | ^ /dev/ . + | ;
2018-11-12 16:10:42 +03:00
vm_deviceplug ( $ storecfg , $ conf , $ vmid , $ opt , $ drive , $ arch , $ machine_type ) ;
2014-11-25 13:37:37 +03:00
}
2011-08-23 09:47:04 +04:00
sub vm_start {
2015-10-23 11:41:53 +03:00
my ( $ storecfg , $ vmid , $ statefile , $ skiplock , $ migratedfrom , $ paused ,
2020-01-14 16:30:37 +03:00
$ forcemachine , $ spice_ticket , $ migration_network , $ migration_type , $ targetstorage , $ timeout ) = @ _ ;
2011-08-23 09:47:04 +04:00
2016-03-07 14:41:12 +03:00
PVE::QemuConfig - > lock_config ( $ vmid , sub {
my $ conf = PVE::QemuConfig - > load_config ( $ vmid , $ migratedfrom ) ;
2011-08-23 09:47:04 +04:00
2016-03-07 14:41:12 +03:00
die "you can't start a vm if it's a template\n" if PVE::QemuConfig - > is_template ( $ conf ) ;
2013-02-14 14:58:53 +04:00
2019-03-14 19:04:48 +03:00
my $ is_suspended = PVE::QemuConfig - > has_lock ( $ conf , 'suspended' ) ;
PVE::QemuConfig - > check_lock ( $ conf )
if ! ( $ skiplock || $ is_suspended ) ;
2011-08-23 09:47:04 +04:00
2012-08-21 14:21:51 +04:00
die "VM $vmid already running\n" if check_running ( $ vmid , undef , $ migratedfrom ) ;
2011-08-23 09:47:04 +04:00
2019-09-11 15:07:43 +03:00
# clean up leftover reboot request files
eval { clear_reboot_request ( $ vmid ) ; } ;
warn $@ if $@ ;
2014-11-17 09:08:44 +03:00
if ( ! $ statefile && scalar ( keys % { $ conf - > { pending } } ) ) {
2014-11-19 14:59:02 +03:00
vmconfig_apply_pending ( $ vmid , $ conf , $ storecfg ) ;
2016-03-07 14:41:12 +03:00
$ conf = PVE::QemuConfig - > load_config ( $ vmid ) ; # update/reload
2014-11-17 09:08:44 +03:00
}
2015-06-16 15:26:43 +03:00
PVE::QemuServer::Cloudinit:: generate_cloudinitconfig ( $ conf , $ vmid ) ;
2012-09-18 11:23:47 +04:00
my $ defaults = load_defaults ( ) ;
# set environment variable useful inside network script
$ ENV { PVE_MIGRATED_FROM } = $ migratedfrom if $ migratedfrom ;
2017-01-03 17:03:15 +03:00
my $ local_volumes = { } ;
2017-01-05 11:16:28 +03:00
if ( $ targetstorage ) {
2017-01-03 17:03:15 +03:00
foreach_drive ( $ conf , sub {
my ( $ ds , $ drive ) = @ _ ;
return if drive_is_cdrom ( $ drive ) ;
my $ volid = $ drive - > { file } ;
return if ! $ volid ;
my ( $ storeid , $ volname ) = PVE::Storage:: parse_volume_id ( $ volid ) ;
my $ scfg = PVE::Storage:: storage_config ( $ storecfg , $ storeid ) ;
return if $ scfg - > { shared } ;
$ local_volumes - > { $ ds } = [ $ volid , $ storeid , $ volname ] ;
} ) ;
my $ format = undef ;
foreach my $ opt ( sort keys %$ local_volumes ) {
my ( $ volid , $ storeid , $ volname ) = @ { $ local_volumes - > { $ opt } } ;
my $ drive = parse_drive ( $ opt , $ conf - > { $ opt } ) ;
2020-01-20 16:00:43 +03:00
# If a remote storage is specified and the format of the original
# volume is not available there, fall back to the default format.
# Otherwise use the same format as the original.
2017-01-03 17:03:15 +03:00
if ( $ targetstorage && $ targetstorage ne "1" ) {
$ storeid = $ targetstorage ;
my ( $ defFormat , $ validFormats ) = PVE::Storage:: storage_default_format ( $ storecfg , $ storeid ) ;
2020-01-20 16:00:43 +03:00
my $ scfg = PVE::Storage:: storage_config ( $ storecfg , $ storeid ) ;
my $ fileFormat = qemu_img_format ( $ scfg , $ volname ) ;
$ format = ( grep { $ fileFormat eq $ _ } @ { $ validFormats } ) ? $ fileFormat : $ defFormat ;
2017-01-03 17:03:15 +03:00
} else {
my $ scfg = PVE::Storage:: storage_config ( $ storecfg , $ storeid ) ;
2020-01-20 16:00:44 +03:00
$ format = qemu_img_format ( $ scfg , $ volname ) ;
2017-01-03 17:03:15 +03:00
}
my $ newvolid = PVE::Storage:: vdisk_alloc ( $ storecfg , $ storeid , $ vmid , $ format , undef , ( $ drive - > { size } / 1024 ) ) ;
my $ newdrive = $ drive ;
$ newdrive - > { format } = $ format ;
$ newdrive - > { file } = $ newvolid ;
2019-12-05 18:11:01 +03:00
my $ drivestr = print_drive ( $ newdrive ) ;
2017-01-03 17:03:15 +03:00
$ local_volumes - > { $ opt } = $ drivestr ;
#pass drive to conf for command line
$ conf - > { $ opt } = $ drivestr ;
}
}
2019-01-31 16:33:39 +03:00
PVE::GuestHelpers:: exec_hookscript ( $ conf , $ vmid , 'pre-start' , 1 ) ;
2019-03-14 19:04:48 +03:00
if ( $ is_suspended ) {
# enforce machine type on suspended vm to ensure HW compatibility
$ forcemachine = $ conf - > { runningmachine } ;
2019-03-19 11:17:30 +03:00
print "Resuming suspended VM\n" ;
2019-03-14 19:04:48 +03:00
}
2015-10-29 09:37:00 +03:00
my ( $ cmd , $ vollist , $ spice_port ) = config_to_command ( $ storecfg , $ vmid , $ conf , $ defaults , $ forcemachine ) ;
2012-09-18 11:23:47 +04:00
2019-11-11 13:28:30 +03:00
my $ migration_ip ;
my $ get_migration_ip = sub {
my ( $ cidr , $ nodename ) = @ _ ;
return $ migration_ip if defined ( $ migration_ip ) ;
if ( ! defined ( $ cidr ) ) {
my $ dc_conf = PVE::Cluster:: cfs_read_file ( 'datacenter.cfg' ) ;
$ cidr = $ dc_conf - > { migration } - > { network } ;
}
if ( defined ( $ cidr ) ) {
my $ ips = PVE::Network:: get_local_ip_from_cidr ( $ cidr ) ;
die "could not get IP: no address configured on local " .
"node for network '$cidr'\n" if scalar ( @$ ips ) == 0 ;
die "could not get IP: multiple addresses configured on local " .
"node for network '$cidr'\n" if scalar ( @$ ips ) > 1 ;
$ migration_ip = @$ ips [ 0 ] ;
}
$ migration_ip = PVE::Cluster:: remote_node_ip ( $ nodename , 1 )
if ! defined ( $ migration_ip ) ;
return $ migration_ip ;
} ;
qemu-server: add support for unsecure migration (setting in datacenter.cfg)
This patch adds support for unsecure migration using a direct tcp connection
KVM <=> KVM instead of an extra SSH tunnel. Without ssh the limit is just the
bandwith and no longer the CPU / one single core.
You can enable this by adding:
migration_unsecure: 1
to datacenter.cfg
Examples using qemu 1.4 as migration with qemu 1.3 still does not work for me:
current default with SSH Tunnel VM uses 2GB mem:
Dec 27 21:10:32 starting migration of VM 105 to node 'cloud1-1202' (10.255.0.20)
Dec 27 21:10:32 copying disk images
Dec 27 21:10:32 starting VM 105 on remote node 'cloud1-1202'
Dec 27 21:10:35 starting ssh migration tunnel
Dec 27 21:10:36 starting online/live migration on localhost:60000
Dec 27 21:10:36 migrate_set_speed: 8589934592
Dec 27 21:10:36 migrate_set_downtime: 1
Dec 27 21:10:38 migration status: active (transferred 152481002, remaining 1938546688), total 2156396544) , expected downtime 0
Dec 27 21:10:40 migration status: active (transferred 279836995, remaining 1811140608), total 2156396544) , expected downtime 0
Dec 27 21:10:42 migration status: active (transferred 421265271, remaining 1669840896), total 2156396544) , expected downtime 0
Dec 27 21:10:44 migration status: active (transferred 570987974, remaining 1520152576), total 2156396544) , expected downtime 0
Dec 27 21:10:46 migration status: active (transferred 721469404, remaining 1369939968), total 2156396544) , expected downtime 0
Dec 27 21:10:48 migration status: active (transferred 875595258, remaining 1216057344), total 2156396544) , expected downtime 0
Dec 27 21:10:50 migration status: active (transferred 1034654822, remaining 1056931840), total 2156396544) , expected downtime 0
Dec 27 21:10:54 migration status: active (transferred 1176288424, remaining 915369984), total 2156396544) , expected downtime 0
Dec 27 21:10:56 migration status: active (transferred 1339734759, remaining 752050176), total 2156396544) , expected downtime 0
Dec 27 21:10:58 migration status: active (transferred 1503743261, remaining 588206080), total 2156396544) , expected downtime 0
Dec 27 21:11:02 migration status: active (transferred 1645097827, remaining 446906368), total 2156396544) , expected downtime 0
Dec 27 21:11:04 migration status: active (transferred 1810562934, remaining 281751552), total 2156396544) , expected downtime 0
Dec 27 21:11:06 migration status: active (transferred 1964377505, remaining 126033920), total 2156396544) , expected downtime 0
Dec 27 21:11:08 migration status: active (transferred 2077930417, remaining 0), total 2156396544) , expected downtime 0
Dec 27 21:11:09 migration speed: 62.06 MB/s - downtime 37 ms
Dec 27 21:11:09 migration status: completed
Dec 27 21:11:13 migration finished successfuly (duration 00:00:41)
TASK OK
with unsecure migration without SSH Tunnel:
Dec 27 22:43:14 starting migration of VM 105 to node 'cloud1-1203' (10.255.0.22)
Dec 27 22:43:14 copying disk images
Dec 27 22:43:14 starting VM 105 on remote node 'cloud1-1203'
Dec 27 22:43:17 starting online/live migration on 10.255.0.22:60000
Dec 27 22:43:17 migrate_set_speed: 8589934592
Dec 27 22:43:17 migrate_set_downtime: 1
Dec 27 22:43:19 migration speed: 1024.00 MB/s - downtime 1100 ms
Dec 27 22:43:19 migration status: completed
Dec 27 22:43:22 migration finished successfuly (duration 00:00:09)
TASK OK
2013-07-26 13:22:58 +04:00
my $ migrate_uri ;
2011-08-23 09:47:04 +04:00
if ( $ statefile ) {
if ( $ statefile eq 'tcp' ) {
qemu-server: add support for unsecure migration (setting in datacenter.cfg)
This patch adds support for unsecure migration using a direct tcp connection
KVM <=> KVM instead of an extra SSH tunnel. Without ssh the limit is just the
bandwith and no longer the CPU / one single core.
You can enable this by adding:
migration_unsecure: 1
to datacenter.cfg
Examples using qemu 1.4 as migration with qemu 1.3 still does not work for me:
current default with SSH Tunnel VM uses 2GB mem:
Dec 27 21:10:32 starting migration of VM 105 to node 'cloud1-1202' (10.255.0.20)
Dec 27 21:10:32 copying disk images
Dec 27 21:10:32 starting VM 105 on remote node 'cloud1-1202'
Dec 27 21:10:35 starting ssh migration tunnel
Dec 27 21:10:36 starting online/live migration on localhost:60000
Dec 27 21:10:36 migrate_set_speed: 8589934592
Dec 27 21:10:36 migrate_set_downtime: 1
Dec 27 21:10:38 migration status: active (transferred 152481002, remaining 1938546688), total 2156396544) , expected downtime 0
Dec 27 21:10:40 migration status: active (transferred 279836995, remaining 1811140608), total 2156396544) , expected downtime 0
Dec 27 21:10:42 migration status: active (transferred 421265271, remaining 1669840896), total 2156396544) , expected downtime 0
Dec 27 21:10:44 migration status: active (transferred 570987974, remaining 1520152576), total 2156396544) , expected downtime 0
Dec 27 21:10:46 migration status: active (transferred 721469404, remaining 1369939968), total 2156396544) , expected downtime 0
Dec 27 21:10:48 migration status: active (transferred 875595258, remaining 1216057344), total 2156396544) , expected downtime 0
Dec 27 21:10:50 migration status: active (transferred 1034654822, remaining 1056931840), total 2156396544) , expected downtime 0
Dec 27 21:10:54 migration status: active (transferred 1176288424, remaining 915369984), total 2156396544) , expected downtime 0
Dec 27 21:10:56 migration status: active (transferred 1339734759, remaining 752050176), total 2156396544) , expected downtime 0
Dec 27 21:10:58 migration status: active (transferred 1503743261, remaining 588206080), total 2156396544) , expected downtime 0
Dec 27 21:11:02 migration status: active (transferred 1645097827, remaining 446906368), total 2156396544) , expected downtime 0
Dec 27 21:11:04 migration status: active (transferred 1810562934, remaining 281751552), total 2156396544) , expected downtime 0
Dec 27 21:11:06 migration status: active (transferred 1964377505, remaining 126033920), total 2156396544) , expected downtime 0
Dec 27 21:11:08 migration status: active (transferred 2077930417, remaining 0), total 2156396544) , expected downtime 0
Dec 27 21:11:09 migration speed: 62.06 MB/s - downtime 37 ms
Dec 27 21:11:09 migration status: completed
Dec 27 21:11:13 migration finished successfuly (duration 00:00:41)
TASK OK
with unsecure migration without SSH Tunnel:
Dec 27 22:43:14 starting migration of VM 105 to node 'cloud1-1203' (10.255.0.22)
Dec 27 22:43:14 copying disk images
Dec 27 22:43:14 starting VM 105 on remote node 'cloud1-1203'
Dec 27 22:43:17 starting online/live migration on 10.255.0.22:60000
Dec 27 22:43:17 migrate_set_speed: 8589934592
Dec 27 22:43:17 migrate_set_downtime: 1
Dec 27 22:43:19 migration speed: 1024.00 MB/s - downtime 1100 ms
Dec 27 22:43:19 migration status: completed
Dec 27 22:43:22 migration finished successfuly (duration 00:00:09)
TASK OK
2013-07-26 13:22:58 +04:00
my $ localip = "localhost" ;
my $ datacenterconf = PVE::Cluster:: cfs_read_file ( 'datacenter.cfg' ) ;
2019-12-10 13:05:39 +03:00
my $ nodename = nodename ( ) ;
2016-10-31 11:42:31 +03:00
2016-12-02 19:55:29 +03:00
if ( ! defined ( $ migration_type ) ) {
if ( defined ( $ datacenterconf - > { migration } - > { type } ) ) {
$ migration_type = $ datacenterconf - > { migration } - > { type } ;
} else {
$ migration_type = 'secure' ;
}
}
2016-10-31 11:42:31 +03:00
if ( $ migration_type eq 'insecure' ) {
2019-11-11 13:28:30 +03:00
$ localip = $ get_migration_ip - > ( $ migration_network , $ nodename ) ;
2016-10-31 11:42:31 +03:00
$ localip = "[$localip]" if Net::IP:: ip_is_ipv6 ( $ localip ) ;
qemu-server: add support for unsecure migration (setting in datacenter.cfg)
This patch adds support for unsecure migration using a direct tcp connection
KVM <=> KVM instead of an extra SSH tunnel. Without ssh the limit is just the
bandwith and no longer the CPU / one single core.
You can enable this by adding:
migration_unsecure: 1
to datacenter.cfg
Examples using qemu 1.4 as migration with qemu 1.3 still does not work for me:
current default with SSH Tunnel VM uses 2GB mem:
Dec 27 21:10:32 starting migration of VM 105 to node 'cloud1-1202' (10.255.0.20)
Dec 27 21:10:32 copying disk images
Dec 27 21:10:32 starting VM 105 on remote node 'cloud1-1202'
Dec 27 21:10:35 starting ssh migration tunnel
Dec 27 21:10:36 starting online/live migration on localhost:60000
Dec 27 21:10:36 migrate_set_speed: 8589934592
Dec 27 21:10:36 migrate_set_downtime: 1
Dec 27 21:10:38 migration status: active (transferred 152481002, remaining 1938546688), total 2156396544) , expected downtime 0
Dec 27 21:10:40 migration status: active (transferred 279836995, remaining 1811140608), total 2156396544) , expected downtime 0
Dec 27 21:10:42 migration status: active (transferred 421265271, remaining 1669840896), total 2156396544) , expected downtime 0
Dec 27 21:10:44 migration status: active (transferred 570987974, remaining 1520152576), total 2156396544) , expected downtime 0
Dec 27 21:10:46 migration status: active (transferred 721469404, remaining 1369939968), total 2156396544) , expected downtime 0
Dec 27 21:10:48 migration status: active (transferred 875595258, remaining 1216057344), total 2156396544) , expected downtime 0
Dec 27 21:10:50 migration status: active (transferred 1034654822, remaining 1056931840), total 2156396544) , expected downtime 0
Dec 27 21:10:54 migration status: active (transferred 1176288424, remaining 915369984), total 2156396544) , expected downtime 0
Dec 27 21:10:56 migration status: active (transferred 1339734759, remaining 752050176), total 2156396544) , expected downtime 0
Dec 27 21:10:58 migration status: active (transferred 1503743261, remaining 588206080), total 2156396544) , expected downtime 0
Dec 27 21:11:02 migration status: active (transferred 1645097827, remaining 446906368), total 2156396544) , expected downtime 0
Dec 27 21:11:04 migration status: active (transferred 1810562934, remaining 281751552), total 2156396544) , expected downtime 0
Dec 27 21:11:06 migration status: active (transferred 1964377505, remaining 126033920), total 2156396544) , expected downtime 0
Dec 27 21:11:08 migration status: active (transferred 2077930417, remaining 0), total 2156396544) , expected downtime 0
Dec 27 21:11:09 migration speed: 62.06 MB/s - downtime 37 ms
Dec 27 21:11:09 migration status: completed
Dec 27 21:11:13 migration finished successfuly (duration 00:00:41)
TASK OK
with unsecure migration without SSH Tunnel:
Dec 27 22:43:14 starting migration of VM 105 to node 'cloud1-1203' (10.255.0.22)
Dec 27 22:43:14 copying disk images
Dec 27 22:43:14 starting VM 105 on remote node 'cloud1-1203'
Dec 27 22:43:17 starting online/live migration on 10.255.0.22:60000
Dec 27 22:43:17 migrate_set_speed: 8589934592
Dec 27 22:43:17 migrate_set_downtime: 1
Dec 27 22:43:19 migration speed: 1024.00 MB/s - downtime 1100 ms
Dec 27 22:43:19 migration status: completed
Dec 27 22:43:22 migration finished successfuly (duration 00:00:09)
TASK OK
2013-07-26 13:22:58 +04:00
}
2016-10-31 11:42:31 +03:00
2015-05-12 13:14:03 +03:00
my $ pfamily = PVE::Tools:: get_host_address_family ( $ nodename ) ;
2019-10-14 14:49:06 +03:00
my $ migrate_port = PVE::Tools:: next_migrate_port ( $ pfamily ) ;
2015-11-05 16:09:16 +03:00
$ migrate_uri = "tcp:${localip}:${migrate_port}" ;
2012-09-18 11:23:47 +04:00
push @$ cmd , '-incoming' , $ migrate_uri ;
push @$ cmd , '-S' ;
migrate: use ssh forwarded UNIX socket tunnel
We cannot guarantee when the SSH forward Tunnel really becomes
ready. The check with the mtunnel API call did not help for this
prolem as it only checked that the SSH connection itself works and
that the destination node has quorum but the forwarded tunnel itself
was not checked.
The Forward tunnel is a different channel in the SSH connection,
independent of the SSH `qm mtunnel` channel, so only if that works
it does not guarantees that our migration tunnel is up and ready.
When the node(s) where under load, or when we did parallel
migrations (migrateall), the migrate command was often started
before a tunnel was open and ready to receive data. This led to
a direct abortion of the migration and is the main cause in why
parallel migrations often leave two thirds or more VMs on the
source node.
The issue was tracked down to SSH after debugging the QEMU
process and enabling debug logging showed that the tunnel became
often to late available and ready, or not at all.
Fixing the TCP forward tunnel is quirky and not straight ahead, the
only way SSH gives as a possibility is to use -N (no command)
-f (background) and -o "ExitOnForwardFailure=yes", then it would
wait in the foreground until the tunnel is ready and only then
background itself. This is not quite the nicest way for our special
use case and our code base.
Waiting for the local port to become open and ready (through
/proc/net/tcp[6]] as a proof of concept is not enough, even if the
port is in the listening state and should theoretically accept
connections this still failed often as the tunnel was not yet fully
ready.
Further another problem would still be open if we tried to patch the
SSH Forward method we currently use - which we solve for free with
the approach of this patch - namely the problem that the method
to get an available port (next_migration_port) has a serious race
condition which could lead to multiple use of the same port on a
parallel migration (I observed this on my many test, seldom but if
it happens its really bad).
So lets now use UNIX sockets, which ssh supports since version 5.7.
The end points are UNIX socket bound to the VMID - thus no port so
no race and also no limitation of available ports (we reserved 50 for
migration).
The endpoints get created in /run/qemu-server/VMID.migrate and as
KVM/QEMU in current versions is able to use UNIX socket just as well
as TCP we have not to change much on the interaction with QEMU.
QEMU is started with the migrate_incoming url at the local
destination endpoint and creates the socket file, we then create
a listening socket on the source side and connect over SSH to the
destination.
Now the migration can be started by issuing the migrate qmp command
with an updated uri.
This breaks live migration from new to old, but *not* from old to
new, so there is a upgrade path.
If a live migration from new to old must be made (for whatever
reason), use the unsecure_migration setting (man datacenter.conf)
to allow this, although that should only be done in trusted network.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2016-06-03 12:32:00 +03:00
} elsif ( $ statefile eq 'unix' ) {
# should be default for secure migrations as a ssh TCP forward
# tunnel is not deterministic reliable ready and fails regurarly
# to set up in time, so use UNIX socket forwards
2016-06-03 16:59:15 +03:00
my $ socket_addr = "/run/qemu-server/$vmid.migrate" ;
unlink $ socket_addr ;
$ migrate_uri = "unix:$socket_addr" ;
migrate: use ssh forwarded UNIX socket tunnel
We cannot guarantee when the SSH forward Tunnel really becomes
ready. The check with the mtunnel API call did not help for this
prolem as it only checked that the SSH connection itself works and
that the destination node has quorum but the forwarded tunnel itself
was not checked.
The Forward tunnel is a different channel in the SSH connection,
independent of the SSH `qm mtunnel` channel, so only if that works
it does not guarantees that our migration tunnel is up and ready.
When the node(s) where under load, or when we did parallel
migrations (migrateall), the migrate command was often started
before a tunnel was open and ready to receive data. This led to
a direct abortion of the migration and is the main cause in why
parallel migrations often leave two thirds or more VMs on the
source node.
The issue was tracked down to SSH after debugging the QEMU
process and enabling debug logging showed that the tunnel became
often to late available and ready, or not at all.
Fixing the TCP forward tunnel is quirky and not straight ahead, the
only way SSH gives as a possibility is to use -N (no command)
-f (background) and -o "ExitOnForwardFailure=yes", then it would
wait in the foreground until the tunnel is ready and only then
background itself. This is not quite the nicest way for our special
use case and our code base.
Waiting for the local port to become open and ready (through
/proc/net/tcp[6]] as a proof of concept is not enough, even if the
port is in the listening state and should theoretically accept
connections this still failed often as the tunnel was not yet fully
ready.
Further another problem would still be open if we tried to patch the
SSH Forward method we currently use - which we solve for free with
the approach of this patch - namely the problem that the method
to get an available port (next_migration_port) has a serious race
condition which could lead to multiple use of the same port on a
parallel migration (I observed this on my many test, seldom but if
it happens its really bad).
So lets now use UNIX sockets, which ssh supports since version 5.7.
The end points are UNIX socket bound to the VMID - thus no port so
no race and also no limitation of available ports (we reserved 50 for
migration).
The endpoints get created in /run/qemu-server/VMID.migrate and as
KVM/QEMU in current versions is able to use UNIX socket just as well
as TCP we have not to change much on the interaction with QEMU.
QEMU is started with the migrate_incoming url at the local
destination endpoint and creates the socket file, we then create
a listening socket on the source side and connect over SSH to the
destination.
Now the migration can be started by issuing the migrate qmp command
with an updated uri.
This breaks live migration from new to old, but *not* from old to
new, so there is a upgrade path.
If a live migration from new to old must be made (for whatever
reason), use the unsecure_migration setting (man datacenter.conf)
to allow this, although that should only be done in trusted network.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
2016-06-03 12:32:00 +03:00
push @$ cmd , '-incoming' , $ migrate_uri ;
push @$ cmd , '-S' ;
2019-10-17 20:13:01 +03:00
} elsif ( - e $ statefile ) {
2012-09-18 11:23:47 +04:00
push @$ cmd , '-loadstate' , $ statefile ;
2019-10-17 20:13:01 +03:00
} else {
my $ statepath = PVE::Storage:: path ( $ storecfg , $ statefile ) ;
2019-10-22 12:52:28 +03:00
push @$ vollist , $ statefile ;
2019-10-17 20:13:01 +03:00
push @$ cmd , '-loadstate' , $ statepath ;
2011-08-23 09:47:04 +04:00
}
2012-12-12 18:35:26 +04:00
} elsif ( $ paused ) {
push @$ cmd , '-S' ;
2011-08-23 09:47:04 +04:00
}
# host pci devices
2011-09-11 10:59:59 +04:00
for ( my $ i = 0 ; $ i < $ MAX_HOSTPCI_DEVICES ; $ i + + ) {
my $ d = parse_hostpci ( $ conf - > { "hostpci$i" } ) ;
next if ! $ d ;
2014-06-23 19:41:55 +04:00
my $ pcidevices = $ d - > { pciid } ;
foreach my $ pcidevice ( @$ pcidevices ) {
2018-11-20 19:13:40 +03:00
my $ pciid = $ pcidevice - > { id } ;
2014-02-14 00:12:29 +04:00
2019-11-12 16:23:03 +03:00
my $ info = PVE::SysFSTools:: pci_device_info ( "$pciid" ) ;
2018-11-16 18:17:51 +03:00
die "IOMMU not present\n" if ! PVE::SysFSTools:: check_iommu_support ( ) ;
2014-06-23 19:41:55 +04:00
die "no pci device info for device '$pciid'\n" if ! $ info ;
2018-11-20 19:13:39 +03:00
if ( $ d - > { mdev } ) {
my $ uuid = PVE::SysFSTools:: generate_mdev_uuid ( $ vmid , $ i ) ;
PVE::SysFSTools:: pci_create_mdev_device ( $ pciid , $ uuid , $ d - > { mdev } ) ;
} else {
die "can't unbind/bind pci group to vfio '$pciid'\n"
if ! PVE::SysFSTools:: pci_dev_group_bind_to_vfio ( $ pciid ) ;
die "can't reset pci device '$pciid'\n"
if $ info - > { has_fl_reset } and ! PVE::SysFSTools:: pci_dev_reset ( $ info ) ;
}
2014-06-23 19:41:55 +04:00
}
2011-09-11 10:59:59 +04:00
}
2011-08-23 09:47:04 +04:00
PVE::Storage:: activate_volumes ( $ storecfg , $ vollist ) ;
2019-06-19 10:21:22 +03:00
eval {
run_command ( [ '/bin/systemctl' , 'stop' , "$vmid.scope" ] ,
outfunc = > sub { } , errfunc = > sub { } ) ;
} ;
# Issues with the above 'stop' not being fully completed are extremely rare, a very low
# timeout should be more than enough here...
PVE::Systemd:: wait_for_unit_removed ( "$vmid.scope" , 5 ) ;
2016-04-21 16:18:28 +03:00
2016-06-03 12:10:01 +03:00
my $ cpuunits = defined ( $ conf - > { cpuunits } ) ? $ conf - > { cpuunits }
: $ defaults - > { cpuunits } ;
2020-01-14 16:30:37 +03:00
my $ start_timeout = $ timeout // config_aware_timeout ( $ conf , $ is_suspended ) ;
2019-12-09 18:14:08 +03:00
my % run_params = (
timeout = > $ statefile ? undef : $ start_timeout ,
umask = > 0077 ,
noerr = > 1 ,
) ;
2016-06-15 06:04:02 +03:00
2019-12-09 18:14:09 +03:00
# when migrating, prefix QEMU output so other side can pick up any
# errors that might occur and show the user
if ( $ migratedfrom ) {
$ run_params { quiet } = 1 ;
2019-12-12 15:38:55 +03:00
$ run_params { logfunc } = sub { print "QEMU: $_[0]\n" } ;
2019-12-09 18:14:09 +03:00
}
2016-06-15 06:04:02 +03:00
my % properties = (
Slice = > 'qemu.slice' ,
KillMode = > 'none' ,
CPUShares = > $ cpuunits
) ;
if ( my $ cpulimit = $ conf - > { cpulimit } ) {
$ properties { CPUQuota } = int ( $ cpulimit * 100 ) ;
}
$ properties { timeout } = 10 if $ statefile ; # setting up the scope shoul be quick
2018-05-07 13:09:10 +03:00
my $ run_qemu = sub {
PVE::Tools:: run_fork sub {
2018-06-15 12:00:53 +03:00
PVE::Systemd:: enter_systemd_scope ( $ vmid , "Proxmox VE VM $vmid" , % properties ) ;
2019-12-09 18:14:08 +03:00
my $ exitcode = run_command ( $ cmd , % run_params ) ;
die "QEMU exited with code $exitcode\n" if $ exitcode ;
2018-05-07 13:09:10 +03:00
} ;
} ;
2016-06-15 06:04:02 +03:00
if ( $ conf - > { hugepages } ) {
my $ code = sub {
my $ hugepages_topology = PVE::QemuServer::Memory:: hugepages_topology ( $ conf ) ;
my $ hugepages_host_topology = PVE::QemuServer::Memory:: hugepages_host_topology ( ) ;
PVE::QemuServer::Memory:: hugepages_mount ( ) ;
PVE::QemuServer::Memory:: hugepages_allocate ( $ hugepages_topology , $ hugepages_host_topology ) ;
2018-05-07 13:09:10 +03:00
eval { $ run_qemu - > ( ) } ;
2016-06-15 06:04:02 +03:00
if ( my $ err = $@ ) {
PVE::QemuServer::Memory:: hugepages_reset ( $ hugepages_host_topology ) ;
die $ err ;
}
PVE::QemuServer::Memory:: hugepages_pre_deallocate ( $ hugepages_topology ) ;
} ;
eval { PVE::QemuServer::Memory:: hugepages_update_locked ( $ code ) ; } ;
} else {
2018-05-07 13:09:10 +03:00
eval { $ run_qemu - > ( ) } ;
2016-06-15 06:04:02 +03:00
}
2016-02-19 13:13:01 +03:00
if ( my $ err = $@ ) {
# deactivate volumes if start fails
eval { PVE::Storage:: deactivate_volumes ( $ storecfg , $ vollist ) ; } ;
die "start failed: $err" ;
}
2011-08-23 09:47:04 +04:00
qemu-server: add support for unsecure migration (setting in datacenter.cfg)
This patch adds support for unsecure migration using a direct tcp connection
KVM <=> KVM instead of an extra SSH tunnel. Without ssh the limit is just the
bandwith and no longer the CPU / one single core.
You can enable this by adding:
migration_unsecure: 1
to datacenter.cfg
Examples using qemu 1.4 as migration with qemu 1.3 still does not work for me:
current default with SSH Tunnel VM uses 2GB mem:
Dec 27 21:10:32 starting migration of VM 105 to node 'cloud1-1202' (10.255.0.20)
Dec 27 21:10:32 copying disk images
Dec 27 21:10:32 starting VM 105 on remote node 'cloud1-1202'
Dec 27 21:10:35 starting ssh migration tunnel
Dec 27 21:10:36 starting online/live migration on localhost:60000
Dec 27 21:10:36 migrate_set_speed: 8589934592
Dec 27 21:10:36 migrate_set_downtime: 1
Dec 27 21:10:38 migration status: active (transferred 152481002, remaining 1938546688), total 2156396544) , expected downtime 0
Dec 27 21:10:40 migration status: active (transferred 279836995, remaining 1811140608), total 2156396544) , expected downtime 0
Dec 27 21:10:42 migration status: active (transferred 421265271, remaining 1669840896), total 2156396544) , expected downtime 0
Dec 27 21:10:44 migration status: active (transferred 570987974, remaining 1520152576), total 2156396544) , expected downtime 0
Dec 27 21:10:46 migration status: active (transferred 721469404, remaining 1369939968), total 2156396544) , expected downtime 0
Dec 27 21:10:48 migration status: active (transferred 875595258, remaining 1216057344), total 2156396544) , expected downtime 0
Dec 27 21:10:50 migration status: active (transferred 1034654822, remaining 1056931840), total 2156396544) , expected downtime 0
Dec 27 21:10:54 migration status: active (transferred 1176288424, remaining 915369984), total 2156396544) , expected downtime 0
Dec 27 21:10:56 migration status: active (transferred 1339734759, remaining 752050176), total 2156396544) , expected downtime 0
Dec 27 21:10:58 migration status: active (transferred 1503743261, remaining 588206080), total 2156396544) , expected downtime 0
Dec 27 21:11:02 migration status: active (transferred 1645097827, remaining 446906368), total 2156396544) , expected downtime 0
Dec 27 21:11:04 migration status: active (transferred 1810562934, remaining 281751552), total 2156396544) , expected downtime 0
Dec 27 21:11:06 migration status: active (transferred 1964377505, remaining 126033920), total 2156396544) , expected downtime 0
Dec 27 21:11:08 migration status: active (transferred 2077930417, remaining 0), total 2156396544) , expected downtime 0
Dec 27 21:11:09 migration speed: 62.06 MB/s - downtime 37 ms
Dec 27 21:11:09 migration status: completed
Dec 27 21:11:13 migration finished successfuly (duration 00:00:41)
TASK OK
with unsecure migration without SSH Tunnel:
Dec 27 22:43:14 starting migration of VM 105 to node 'cloud1-1203' (10.255.0.22)
Dec 27 22:43:14 copying disk images
Dec 27 22:43:14 starting VM 105 on remote node 'cloud1-1203'
Dec 27 22:43:17 starting online/live migration on 10.255.0.22:60000
Dec 27 22:43:17 migrate_set_speed: 8589934592
Dec 27 22:43:17 migrate_set_downtime: 1
Dec 27 22:43:19 migration speed: 1024.00 MB/s - downtime 1100 ms
Dec 27 22:43:19 migration status: completed
Dec 27 22:43:22 migration finished successfuly (duration 00:00:09)
TASK OK
2013-07-26 13:22:58 +04:00
print "migration listens on $migrate_uri\n" if $ migrate_uri ;
2012-01-27 12:35:26 +04:00
2017-04-19 16:19:47 +03:00
if ( $ statefile && $ statefile ne 'tcp' && $ statefile ne 'unix' ) {
2019-11-19 14:23:47 +03:00
eval { mon_cmd ( $ vmid , "cont" ) ; } ;
2012-09-19 14:37:33 +04:00
warn $@ if $@ ;
2012-09-19 12:40:30 +04:00
}
2017-01-03 17:03:15 +03:00
#start nbd server for storage migration
if ( $ targetstorage ) {
2019-12-10 13:05:39 +03:00
my $ nodename = nodename ( ) ;
2019-11-11 13:28:30 +03:00
my $ localip = $ get_migration_ip - > ( $ migration_network , $ nodename ) ;
2017-01-03 17:03:15 +03:00
my $ pfamily = PVE::Tools:: get_host_address_family ( $ nodename ) ;
2019-10-14 14:49:06 +03:00
my $ storage_migrate_port = PVE::Tools:: next_migrate_port ( $ pfamily ) ;
2017-01-03 17:03:15 +03:00
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "nbd-server-start" , addr = > { type = > 'inet' , data = > { host = > "${localip}" , port = > "${storage_migrate_port}" } } ) ;
2017-01-03 17:03:15 +03:00
$ localip = "[$localip]" if Net::IP:: ip_is_ipv6 ( $ localip ) ;
foreach my $ opt ( sort keys %$ local_volumes ) {
2020-01-29 16:30:08 +03:00
my $ drivestr = $ local_volumes - > { $ opt } ;
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "nbd-server-add" , device = > "drive-$opt" , writable = > JSON:: true ) ;
2019-10-14 14:49:06 +03:00
my $ migrate_storage_uri = "nbd:${localip}:${storage_migrate_port}:exportname=drive-$opt" ;
2020-01-29 16:30:08 +03:00
print "storage migration listens on $migrate_storage_uri volume:$drivestr\n" ;
2017-01-03 17:03:15 +03:00
}
}
2013-07-24 13:24:20 +04:00
if ( $ migratedfrom ) {
2014-01-09 13:58:48 +04:00
eval {
2014-11-19 12:43:42 +03:00
set_migration_caps ( $ vmid ) ;
2014-01-09 13:58:48 +04:00
} ;
2013-07-24 13:24:20 +04:00
warn $@ if $@ ;
2014-01-09 13:58:48 +04:00
2013-07-24 13:24:20 +04:00
if ( $ spice_port ) {
print "spice listens on port $spice_port\n" ;
if ( $ spice_ticket ) {
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "set_password" , protocol = > 'spice' , password = > $ spice_ticket ) ;
mon_cmd ( $ vmid , "expire_password" , protocol = > 'spice' , time = > "+30" ) ;
2013-07-24 11:52:33 +04:00
}
}
2013-07-24 13:24:20 +04:00
} else {
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "balloon" , value = > $ conf - > { balloon } * 1024 * 1024 )
2018-05-14 15:03:05 +03:00
if ! $ statefile && $ conf - > { balloon } ;
2015-01-20 13:47:11 +03:00
foreach my $ opt ( keys %$ conf ) {
next if $ opt !~ m/^net\d+$/ ;
my $ nicconf = parse_net ( $ conf - > { $ opt } ) ;
qemu_set_link_status ( $ vmid , $ opt , 0 ) if $ nicconf - > { link_down } ;
}
2012-08-28 14:46:08 +04:00
}
2015-03-27 08:16:24 +03:00
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , 'qom-set' ,
2015-03-09 10:22:12 +03:00
path = > "machine/peripheral/balloon0" ,
property = > "guest-stats-polling-interval" ,
value = > 2 ) if ( ! defined ( $ conf - > { balloon } ) || $ conf - > { balloon } ) ;
2019-11-29 13:06:47 +03:00
if ( $ is_suspended ) {
2019-03-19 11:17:30 +03:00
print "Resumed VM, removing state\n" ;
2019-11-29 13:06:47 +03:00
if ( my $ vmstate = $ conf - > { vmstate } ) {
PVE::Storage:: deactivate_volumes ( $ storecfg , [ $ vmstate ] ) ;
PVE::Storage:: vdisk_free ( $ storecfg , $ vmstate ) ;
}
2019-03-14 19:04:48 +03:00
delete $ conf - > @ { qw( lock vmstate runningmachine ) } ;
PVE::QemuConfig - > write_config ( $ vmid , $ conf ) ;
}
2019-01-31 16:33:39 +03:00
PVE::GuestHelpers:: exec_hookscript ( $ conf , $ vmid , 'post-start' ) ;
2011-08-23 09:47:04 +04:00
} ) ;
}
sub vm_commandline {
2019-01-30 16:43:38 +03:00
my ( $ storecfg , $ vmid , $ snapname ) = @ _ ;
2011-08-23 09:47:04 +04:00
2016-03-07 14:41:12 +03:00
my $ conf = PVE::QemuConfig - > load_config ( $ vmid ) ;
2019-11-20 15:24:56 +03:00
my $ forcemachine ;
2011-08-23 09:47:04 +04:00
2019-01-30 16:43:38 +03:00
if ( $ snapname ) {
my $ snapshot = $ conf - > { snapshots } - > { $ snapname } ;
2019-01-30 17:08:15 +03:00
die "snapshot '$snapname' does not exist\n" if ! defined ( $ snapshot ) ;
2019-11-20 15:24:56 +03:00
# check for a 'runningmachine' in snapshot
$ forcemachine = $ snapshot - > { runningmachine } if $ snapshot - > { runningmachine } ;
2019-01-30 17:08:15 +03:00
$ snapshot - > { digest } = $ conf - > { digest } ; # keep file digest for API
2019-01-30 16:43:38 +03:00
$ conf = $ snapshot ;
}
2011-08-23 09:47:04 +04:00
my $ defaults = load_defaults ( ) ;
2019-11-20 15:24:56 +03:00
my $ cmd = config_to_command ( $ storecfg , $ vmid , $ conf , $ defaults , $ forcemachine ) ;
2011-08-23 09:47:04 +04:00
2016-09-21 16:14:18 +03:00
return PVE::Tools:: cmd2string ( $ cmd ) ;
2011-08-23 09:47:04 +04:00
}
sub vm_reset {
my ( $ vmid , $ skiplock ) = @ _ ;
2016-03-07 14:41:12 +03:00
PVE::QemuConfig - > lock_config ( $ vmid , sub {
2011-08-23 09:47:04 +04:00
2016-03-07 14:41:12 +03:00
my $ conf = PVE::QemuConfig - > load_config ( $ vmid ) ;
2011-08-23 09:47:04 +04:00
2016-03-07 14:41:12 +03:00
PVE::QemuConfig - > check_lock ( $ conf ) if ! $ skiplock ;
2011-08-23 09:47:04 +04:00
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "system_reset" ) ;
2011-11-25 11:05:36 +04:00
} ) ;
}
sub get_vm_volumes {
my ( $ conf ) = @ _ ;
2011-08-23 09:47:04 +04:00
2011-11-25 11:05:36 +04:00
my $ vollist = [] ;
2012-09-25 09:42:01 +04:00
foreach_volid ( $ conf , sub {
2017-06-13 07:47:05 +03:00
my ( $ volid , $ attr ) = @ _ ;
2011-11-25 11:05:36 +04:00
2012-09-25 09:42:01 +04:00
return if $ volid =~ m | ^ / | ;
2011-11-25 11:05:36 +04:00
2012-09-25 09:42:01 +04:00
my ( $ sid , $ volname ) = PVE::Storage:: parse_volume_id ( $ volid , 1 ) ;
return if ! $ sid ;
2011-11-25 11:05:36 +04:00
push @$ vollist , $ volid ;
2011-08-23 09:47:04 +04:00
} ) ;
2011-11-25 11:05:36 +04:00
return $ vollist ;
}
sub vm_stop_cleanup {
2015-01-21 12:42:43 +03:00
my ( $ storecfg , $ vmid , $ conf , $ keepActive , $ apply_pending_changes ) = @ _ ;
2011-11-25 11:05:36 +04:00
2011-12-16 09:34:35 +04:00
eval {
2011-11-25 11:05:36 +04:00
2012-01-17 14:56:56 +04:00
if ( ! $ keepActive ) {
my $ vollist = get_vm_volumes ( $ conf ) ;
PVE::Storage:: deactivate_volumes ( $ storecfg , $ vollist ) ;
}
2015-03-27 08:16:24 +03:00
2012-09-03 11:51:08 +04:00
foreach my $ ext ( qw( mon qmp pid vnc qga ) ) {
2012-03-01 15:54:06 +04:00
unlink "/var/run/qemu-server/${vmid}.$ext" ;
}
2015-03-27 08:16:24 +03:00
2019-02-22 13:38:33 +03:00
if ( $ conf - > { ivshmem } ) {
my $ ivshmem = PVE::JSONSchema:: parse_property_string ( $ ivshmem_fmt , $ conf - > { ivshmem } ) ;
2019-02-26 10:20:37 +03:00
# just delete it for now, VMs which have this already open do not
# are affected, but new VMs will get a separated one. If this
# becomes an issue we either add some sort of ref-counting or just
# add a "don't delete on stop" flag to the ivshmem format.
2019-02-22 13:38:33 +03:00
unlink '/dev/shm/pve-shm-' . ( $ ivshmem - > { name } // $ vmid ) ;
}
2018-11-20 19:13:39 +03:00
foreach my $ key ( keys %$ conf ) {
next if $ key !~ m/^hostpci(\d+)$/ ;
my $ hostpciindex = $ 1 ;
my $ d = parse_hostpci ( $ conf - > { $ key } ) ;
my $ uuid = PVE::SysFSTools:: generate_mdev_uuid ( $ vmid , $ hostpciindex ) ;
foreach my $ pci ( @ { $ d - > { pciid } } ) {
2018-11-20 19:13:40 +03:00
my $ pciid = $ pci - > { id } ;
2018-11-20 19:13:39 +03:00
PVE::SysFSTools:: pci_cleanup_mdev_device ( $ pciid , $ uuid ) ;
}
}
2015-01-21 12:42:43 +03:00
vmconfig_apply_pending ( $ vmid , $ conf , $ storecfg ) if $ apply_pending_changes ;
2011-12-16 09:34:35 +04:00
} ;
warn $@ if $@ ; # avoid errors - just warn
2011-08-23 09:47:04 +04:00
}
2019-09-11 15:07:44 +03:00
# call only in locked context
sub _do_vm_stop {
my ( $ storecfg , $ vmid , $ skiplock , $ nocheck , $ timeout , $ shutdown , $ force , $ keepActive ) = @ _ ;
2011-12-15 15:47:39 +04:00
2019-09-11 15:07:44 +03:00
my $ pid = check_running ( $ vmid , $ nocheck ) ;
return if ! $ pid ;
2011-08-23 09:47:04 +04:00
2019-09-11 15:07:44 +03:00
my $ conf ;
if ( ! $ nocheck ) {
$ conf = PVE::QemuConfig - > load_config ( $ vmid ) ;
PVE::QemuConfig - > check_lock ( $ conf ) if ! $ skiplock ;
if ( ! defined ( $ timeout ) && $ shutdown && $ conf - > { startup } ) {
my $ opts = PVE::JSONSchema:: pve_parse_startup_order ( $ conf - > { startup } ) ;
$ timeout = $ opts - > { down } if $ opts - > { down } ;
2011-09-15 10:21:32 +04:00
}
2019-09-11 15:07:44 +03:00
PVE::GuestHelpers:: exec_hookscript ( $ conf , $ vmid , 'pre-stop' ) ;
}
2011-09-12 14:26:00 +04:00
2019-09-11 15:07:44 +03:00
eval {
if ( $ shutdown ) {
if ( defined ( $ conf ) && parse_guest_agent ( $ conf ) - > { enabled } ) {
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "guest-shutdown" , timeout = > $ timeout ) ;
2011-12-15 15:47:39 +04:00
} else {
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "system_powerdown" ) ;
2011-08-23 09:47:04 +04:00
}
} else {
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "quit" ) ;
2011-08-23 09:47:04 +04:00
}
2019-09-11 15:07:44 +03:00
} ;
my $ err = $@ ;
2011-08-23 09:47:04 +04:00
2019-09-11 15:07:44 +03:00
if ( ! $ err ) {
$ timeout = 60 if ! defined ( $ timeout ) ;
2011-08-23 09:47:04 +04:00
my $ count = 0 ;
2011-09-15 10:21:32 +04:00
while ( ( $ count < $ timeout ) && check_running ( $ vmid , $ nocheck ) ) {
2011-08-23 09:47:04 +04:00
$ count + + ;
sleep 1 ;
}
if ( $ count >= $ timeout ) {
2019-09-11 15:07:44 +03:00
if ( $ force ) {
warn "VM still running - terminating now with SIGTERM\n" ;
kill 15 , $ pid ;
} else {
die "VM quit/powerdown failed - got timeout\n" ;
}
} else {
vm_stop_cleanup ( $ storecfg , $ vmid , $ conf , $ keepActive , 1 ) if $ conf ;
return ;
2011-08-23 09:47:04 +04:00
}
2019-09-11 15:07:44 +03:00
} else {
if ( $ force ) {
warn "VM quit/powerdown failed - terminating now with SIGTERM\n" ;
kill 15 , $ pid ;
} else {
die "VM quit/powerdown failed\n" ;
}
}
# wait again
$ timeout = 10 ;
my $ count = 0 ;
while ( ( $ count < $ timeout ) && check_running ( $ vmid , $ nocheck ) ) {
$ count + + ;
sleep 1 ;
}
if ( $ count >= $ timeout ) {
warn "VM still running - terminating now with SIGKILL\n" ;
kill 9 , $ pid ;
sleep 1 ;
}
2011-08-23 09:47:04 +04:00
2019-09-11 15:07:44 +03:00
vm_stop_cleanup ( $ storecfg , $ vmid , $ conf , $ keepActive , 1 ) if $ conf ;
}
# Note: use $nocheck to skip tests if VM configuration file exists.
# We need that when migration VMs to other nodes (files already moved)
# Note: we set $keepActive in vzdump stop mode - volumes need to stay active
sub vm_stop {
my ( $ storecfg , $ vmid , $ skiplock , $ nocheck , $ timeout , $ shutdown , $ force , $ keepActive , $ migratedfrom ) = @ _ ;
$ force = 1 if ! defined ( $ force ) && ! $ shutdown ;
if ( $ migratedfrom ) {
my $ pid = check_running ( $ vmid , $ nocheck , $ migratedfrom ) ;
kill 15 , $ pid if $ pid ;
my $ conf = PVE::QemuConfig - > load_config ( $ vmid , $ migratedfrom ) ;
vm_stop_cleanup ( $ storecfg , $ vmid , $ conf , $ keepActive , 0 ) ;
return ;
}
PVE::QemuConfig - > lock_config ( $ vmid , sub {
_do_vm_stop ( $ storecfg , $ vmid , $ skiplock , $ nocheck , $ timeout , $ shutdown , $ force , $ keepActive ) ;
2011-11-25 11:05:36 +04:00
} ) ;
2011-08-23 09:47:04 +04:00
}
2019-09-11 15:07:45 +03:00
sub vm_reboot {
my ( $ vmid , $ timeout ) = @ _ ;
PVE::QemuConfig - > lock_config ( $ vmid , sub {
2019-11-11 19:29:23 +03:00
eval {
2019-09-11 15:07:45 +03:00
2019-11-11 19:29:23 +03:00
# only reboot if running, as qmeventd starts it again on a stop event
return if ! check_running ( $ vmid ) ;
2019-09-11 15:07:45 +03:00
2019-11-11 19:29:23 +03:00
create_reboot_request ( $ vmid ) ;
2019-09-11 15:07:45 +03:00
2019-11-11 19:29:23 +03:00
my $ storecfg = PVE::Storage:: config ( ) ;
_do_vm_stop ( $ storecfg , $ vmid , undef , undef , $ timeout , 1 ) ;
2019-09-11 15:07:45 +03:00
2019-11-11 19:29:23 +03:00
} ;
if ( my $ err = $@ ) {
2019-11-11 20:05:20 +03:00
# avoid that the next normal shutdown will be confused for a reboot
2019-11-11 19:29:23 +03:00
clear_reboot_request ( $ vmid ) ;
die $ err ;
}
2019-09-11 15:07:45 +03:00
} ) ;
}
2019-12-09 17:26:59 +03:00
# note: if using the statestorage parameter, the caller has to check privileges
2011-08-23 09:47:04 +04:00
sub vm_suspend {
2019-03-14 19:04:50 +03:00
my ( $ vmid , $ skiplock , $ includestate , $ statestorage ) = @ _ ;
2019-03-14 19:04:47 +03:00
my $ conf ;
my $ path ;
my $ storecfg ;
my $ vmstate ;
2011-08-23 09:47:04 +04:00
2016-03-07 14:41:12 +03:00
PVE::QemuConfig - > lock_config ( $ vmid , sub {
2011-08-23 09:47:04 +04:00
2019-03-14 19:04:47 +03:00
$ conf = PVE::QemuConfig - > load_config ( $ vmid ) ;
2011-08-23 09:47:04 +04:00
2019-03-14 19:04:47 +03:00
my $ is_backing_up = PVE::QemuConfig - > has_lock ( $ conf , 'backup' ) ;
2016-03-11 14:11:57 +03:00
PVE::QemuConfig - > check_lock ( $ conf )
2019-03-14 19:04:47 +03:00
if ! ( $ skiplock || $ is_backing_up ) ;
die "cannot suspend to disk during backup\n"
if $ is_backing_up && $ includestate ;
2012-06-26 08:42:18 +04:00
2019-03-14 19:04:47 +03:00
if ( $ includestate ) {
$ conf - > { lock } = 'suspending' ;
my $ date = strftime ( "%Y-%m-%d" , localtime ( time ( ) ) ) ;
$ storecfg = PVE::Storage:: config ( ) ;
2019-12-09 17:26:59 +03:00
if ( ! $ statestorage ) {
$ statestorage = find_vmstate_storage ( $ conf , $ storecfg ) ;
# check permissions for the storage
my $ rpcenv = PVE::RPCEnvironment:: get ( ) ;
if ( $ rpcenv - > { type } ne 'cli' ) {
my $ authuser = $ rpcenv - > get_user ( ) ;
$ rpcenv - > check ( $ authuser , "/storage/$statestorage" , [ 'Datastore.AllocateSpace' ] ) ;
}
}
2019-03-14 19:04:50 +03:00
$ vmstate = PVE::QemuConfig - > __snapshot_save_vmstate ( $ vmid , $ conf , "suspend-$date" , $ storecfg , $ statestorage , 1 ) ;
2019-03-14 19:04:47 +03:00
$ path = PVE::Storage:: path ( $ storecfg , $ vmstate ) ;
PVE::QemuConfig - > write_config ( $ vmid , $ conf ) ;
} else {
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "stop" ) ;
2019-03-14 19:04:47 +03:00
}
2011-08-23 09:47:04 +04:00
} ) ;
2019-03-14 19:04:47 +03:00
if ( $ includestate ) {
# save vm state
PVE::Storage:: activate_volumes ( $ storecfg , [ $ vmstate ] ) ;
eval {
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "savevm-start" , statefile = > $ path ) ;
2019-03-14 19:04:47 +03:00
for ( ; ; ) {
2019-11-19 14:23:47 +03:00
my $ state = mon_cmd ( $ vmid , "query-savevm" ) ;
2019-03-14 19:04:47 +03:00
if ( ! $ state - > { status } ) {
die "savevm not active\n" ;
} elsif ( $ state - > { status } eq 'active' ) {
sleep ( 1 ) ;
next ;
} elsif ( $ state - > { status } eq 'completed' ) {
2019-03-19 11:17:30 +03:00
print "State saved, quitting\n" ;
2019-03-14 19:04:47 +03:00
last ;
} elsif ( $ state - > { status } eq 'failed' && $ state - > { error } ) {
die "query-savevm failed with error '$state->{error}'\n"
} else {
die "query-savevm returned status '$state->{status}'\n" ;
}
}
} ;
my $ err = $@ ;
PVE::QemuConfig - > lock_config ( $ vmid , sub {
$ conf = PVE::QemuConfig - > load_config ( $ vmid ) ;
if ( $ err ) {
# cleanup, but leave suspending lock, to indicate something went wrong
eval {
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "savevm-end" ) ;
2019-03-14 19:04:47 +03:00
PVE::Storage:: deactivate_volumes ( $ storecfg , [ $ vmstate ] ) ;
PVE::Storage:: vdisk_free ( $ storecfg , $ vmstate ) ;
delete $ conf - > @ { qw( vmstate runningmachine ) } ;
PVE::QemuConfig - > write_config ( $ vmid , $ conf ) ;
} ;
warn $@ if $@ ;
die $ err ;
}
die "lock changed unexpectedly\n"
if ! PVE::QemuConfig - > has_lock ( $ conf , 'suspending' ) ;
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , "quit" ) ;
2019-03-14 19:04:47 +03:00
$ conf - > { lock } = 'suspended' ;
PVE::QemuConfig - > write_config ( $ vmid , $ conf ) ;
} ) ;
}
2011-08-23 09:47:04 +04:00
}
sub vm_resume {
2015-10-14 12:06:06 +03:00
my ( $ vmid , $ skiplock , $ nocheck ) = @ _ ;
2011-08-23 09:47:04 +04:00
2016-03-07 14:41:12 +03:00
PVE::QemuConfig - > lock_config ( $ vmid , sub {
2019-11-19 14:23:47 +03:00
my $ res = mon_cmd ( $ vmid , 'query-status' ) ;
2018-06-13 12:17:26 +03:00
my $ resume_cmd = 'cont' ;
if ( $ res - > { status } && $ res - > { status } eq 'suspended' ) {
$ resume_cmd = 'system_wakeup' ;
}
2015-10-14 12:06:06 +03:00
if ( ! $ nocheck ) {
2011-08-23 09:47:04 +04:00
2016-03-07 14:41:12 +03:00
my $ conf = PVE::QemuConfig - > load_config ( $ vmid ) ;
2011-08-23 09:47:04 +04:00
2016-03-11 14:11:57 +03:00
PVE::QemuConfig - > check_lock ( $ conf )
if ! ( $ skiplock || PVE::QemuConfig - > has_lock ( $ conf , 'backup' ) ) ;
2015-10-14 12:06:06 +03:00
}
2019-05-23 22:22:22 +03:00
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , $ resume_cmd ) ;
2011-08-23 09:47:04 +04:00
} ) ;
}
2011-10-10 15:17:40 +04:00
sub vm_sendkey {
my ( $ vmid , $ skiplock , $ key ) = @ _ ;
2011-08-23 09:47:04 +04:00
2016-03-07 14:41:12 +03:00
PVE::QemuConfig - > lock_config ( $ vmid , sub {
2011-08-23 09:47:04 +04:00
2016-03-07 14:41:12 +03:00
my $ conf = PVE::QemuConfig - > load_config ( $ vmid ) ;
2012-08-23 09:36:48 +04:00
2012-07-13 10:56:13 +04:00
# there is no qmp command, so we use the human monitor command
2019-11-19 14:23:47 +03:00
my $ res = PVE::QemuServer::Monitor:: hmp_cmd ( $ vmid , "sendkey $key" ) ;
2019-07-15 15:35:29 +03:00
die $ res if $ res ne '' ;
2011-08-23 09:47:04 +04:00
} ) ;
}
2011-10-17 15:49:48 +04:00
# vzdump restore implementaion
2013-01-04 09:57:11 +04:00
sub tar_archive_read_firstfile {
2011-10-17 15:49:48 +04:00
my $ archive = shift ;
2012-01-27 12:35:26 +04:00
2011-10-17 15:49:48 +04:00
die "ERROR: file '$archive' does not exist\n" if ! - f $ archive ;
# try to detect archive type first
2016-06-09 17:54:46 +03:00
my $ pid = open ( my $ fh , '-|' , 'tar' , 'tf' , $ archive ) ||
2011-10-17 15:49:48 +04:00
die "unable to open file '$archive'\n" ;
2016-06-09 17:54:46 +03:00
my $ firstfile = <$fh> ;
2011-10-17 15:49:48 +04:00
kill 15 , $ pid ;
2016-06-09 17:54:46 +03:00
close $ fh ;
2011-10-17 15:49:48 +04:00
die "ERROR: archive contaions no data\n" if ! $ firstfile ;
chomp $ firstfile ;
return $ firstfile ;
}
2013-01-04 09:57:11 +04:00
sub tar_restore_cleanup {
my ( $ storecfg , $ statfile ) = @ _ ;
2011-10-17 15:49:48 +04:00
print STDERR "starting cleanup\n" ;
if ( my $ fd = IO::File - > new ( $ statfile , "r" ) ) {
while ( defined ( my $ line = <$fd> ) ) {
if ( $ line =~ m/vzdump:([^\s:]*):(\S+)$/ ) {
my $ volid = $ 2 ;
eval {
if ( $ volid =~ m | ^ / | ) {
unlink $ volid || die 'unlink failed\n' ;
} else {
2013-01-04 09:57:11 +04:00
PVE::Storage:: vdisk_free ( $ storecfg , $ volid ) ;
2011-10-17 15:49:48 +04:00
}
2012-01-27 12:35:26 +04:00
print STDERR "temporary volume '$volid' sucessfuly removed\n" ;
2011-10-17 15:49:48 +04:00
} ;
print STDERR "unable to cleanup '$volid' - $@" if $@ ;
} else {
print STDERR "unable to parse line in statfile - $line" ;
2012-01-27 12:35:26 +04:00
}
2011-10-17 15:49:48 +04:00
}
$ fd - > close ( ) ;
}
}
sub restore_archive {
2012-02-02 09:39:38 +04:00
my ( $ archive , $ vmid , $ user , $ opts ) = @ _ ;
2011-10-17 15:49:48 +04:00
2012-12-12 18:35:26 +04:00
my $ format = $ opts - > { format } ;
my $ comp ;
if ( $ archive =~ m/\.tgz$/ || $ archive =~ m/\.tar\.gz$/ ) {
$ format = 'tar' if ! $ format ;
$ comp = 'gzip' ;
} elsif ( $ archive =~ m/\.tar$/ ) {
$ format = 'tar' if ! $ format ;
} elsif ( $ archive =~ m/.tar.lzo$/ ) {
$ format = 'tar' if ! $ format ;
$ comp = 'lzop' ;
} elsif ( $ archive =~ m/\.vma$/ ) {
$ format = 'vma' if ! $ format ;
} elsif ( $ archive =~ m/\.vma\.gz$/ ) {
$ format = 'vma' if ! $ format ;
$ comp = 'gzip' ;
} elsif ( $ archive =~ m/\.vma\.lzo$/ ) {
$ format = 'vma' if ! $ format ;
$ comp = 'lzop' ;
} else {
$ format = 'vma' if ! $ format ; # default
}
# try to detect archive format
if ( $ format eq 'tar' ) {
return restore_tar_archive ( $ archive , $ vmid , $ user , $ opts ) ;
} else {
return restore_vma_archive ( $ archive , $ vmid , $ user , $ opts , $ comp ) ;
}
}
sub restore_update_config_line {
my ( $ outfd , $ cookie , $ vmid , $ map , $ line , $ unique ) = @ _ ;
return if $ line =~ m/^\#qmdump\#/ ;
return if $ line =~ m/^\#vzdump\#/ ;
return if $ line =~ m/^lock:/ ;
return if $ line =~ m/^unused\d+:/ ;
return if $ line =~ m/^parent:/ ;
2016-07-13 17:25:44 +03:00
my $ dc = PVE::Cluster:: cfs_read_file ( 'datacenter.cfg' ) ;
2012-12-12 18:35:26 +04:00
if ( ( $ line =~ m/^(vlan(\d+)):\s*(\S+)\s*$/ ) ) {
# try to convert old 1.X settings
my ( $ id , $ ind , $ ethcfg ) = ( $ 1 , $ 2 , $ 3 ) ;
foreach my $ devconfig ( PVE::Tools:: split_list ( $ ethcfg ) ) {
my ( $ model , $ macaddr ) = split ( /\=/ , $ devconfig ) ;
2016-07-13 17:25:44 +03:00
$ macaddr = PVE::Tools:: random_ether_addr ( $ dc - > { mac_prefix } ) if ! $ macaddr || $ unique ;
2012-12-12 18:35:26 +04:00
my $ net = {
model = > $ model ,
bridge = > "vmbr$ind" ,
macaddr = > $ macaddr ,
} ;
my $ netstr = print_net ( $ net ) ;
print $ outfd "net$cookie->{netcount}: $netstr\n" ;
$ cookie - > { netcount } + + ;
}
} elsif ( ( $ line =~ m/^(net\d+):\s*(\S+)\s*$/ ) && $ unique ) {
my ( $ id , $ netstr ) = ( $ 1 , $ 2 ) ;
my $ net = parse_net ( $ netstr ) ;
2016-07-13 17:25:44 +03:00
$ net - > { macaddr } = PVE::Tools:: random_ether_addr ( $ dc - > { mac_prefix } ) if $ net - > { macaddr } ;
2012-12-12 18:35:26 +04:00
$ netstr = print_net ( $ net ) ;
print $ outfd "$id: $netstr\n" ;
2016-09-08 12:02:59 +03:00
} elsif ( $ line =~ m/^((ide|scsi|virtio|sata|efidisk)\d+):\s*(\S+)\s*$/ ) {
2012-12-12 18:35:26 +04:00
my $ virtdev = $ 1 ;
2013-01-07 09:49:11 +04:00
my $ value = $ 3 ;
2016-02-10 16:28:18 +03:00
my $ di = parse_drive ( $ virtdev , $ value ) ;
if ( defined ( $ di - > { backup } ) && ! $ di - > { backup } ) {
2012-12-12 18:35:26 +04:00
print $ outfd "#$line" ;
2016-02-10 16:28:19 +03:00
} elsif ( $ map - > { $ virtdev } ) {
2013-05-21 14:02:41 +04:00
delete $ di - > { format } ; # format can change on restore
2012-12-12 18:35:26 +04:00
$ di - > { file } = $ map - > { $ virtdev } ;
2019-12-05 18:11:01 +03:00
$ value = print_drive ( $ di ) ;
2012-12-12 18:35:26 +04:00
print $ outfd "$virtdev: $value\n" ;
} else {
print $ outfd $ line ;
}
2018-09-19 14:31:19 +03:00
} elsif ( ( $ line =~ m/^vmgenid: (.*)/ ) ) {
2018-09-19 15:15:56 +03:00
my $ vmgenid = $ 1 ;
2018-09-19 12:35:11 +03:00
if ( $ vmgenid ne '0' ) {
2018-09-19 14:31:19 +03:00
# always generate a new vmgenid if there was a valid one setup
2018-09-19 12:35:11 +03:00
$ vmgenid = generate_uuid ( ) ;
}
2018-09-19 14:31:19 +03:00
print $ outfd "vmgenid: $vmgenid\n" ;
2018-02-01 16:51:05 +03:00
} elsif ( ( $ line =~ m/^(smbios1: )(.*)/ ) && $ unique ) {
my ( $ uuid , $ uuid_str ) ;
UUID:: generate ( $ uuid ) ;
UUID:: unparse ( $ uuid , $ uuid_str ) ;
my $ smbios1 = parse_smbios1 ( $ 2 ) ;
$ smbios1 - > { uuid } = $ uuid_str ;
print $ outfd $ 1 . print_smbios1 ( $ smbios1 ) . "\n" ;
2012-12-12 18:35:26 +04:00
} else {
print $ outfd $ line ;
}
}
sub scan_volids {
my ( $ cfg , $ vmid ) = @ _ ;
my $ info = PVE::Storage:: vdisk_list ( $ cfg , undef , $ vmid ) ;
my $ volid_hash = { } ;
foreach my $ storeid ( keys %$ info ) {
foreach my $ item ( @ { $ info - > { $ storeid } } ) {
next if ! ( $ item - > { volid } && $ item - > { size } ) ;
2013-05-27 10:25:39 +04:00
$ item - > { path } = PVE::Storage:: path ( $ cfg , $ item - > { volid } ) ;
2012-12-12 18:35:26 +04:00
$ volid_hash - > { $ item - > { volid } } = $ item ;
}
}
return $ volid_hash ;
}
2019-12-09 16:08:09 +03:00
sub update_disk_config {
2012-12-12 18:35:26 +04:00
my ( $ vmid , $ conf , $ volid_hash ) = @ _ ;
2013-07-15 11:13:31 +04:00
2012-12-12 18:35:26 +04:00
my $ changes ;
2018-07-11 09:40:06 +03:00
my $ prefix = "VM $vmid:" ;
2012-12-12 18:35:26 +04:00
2017-11-16 11:20:56 +03:00
# used and unused disks
my $ referenced = { } ;
2012-12-12 18:35:26 +04:00
2013-05-27 10:25:39 +04:00
# Note: it is allowed to define multiple storages with same path (alias), so
# we need to check both 'volid' and real 'path' (two different volid can point
# to the same path).
2017-11-16 11:20:56 +03:00
my $ referencedpath = { } ;
2013-07-15 11:13:31 +04:00
2012-12-12 18:35:26 +04:00
# update size info
foreach my $ opt ( keys %$ conf ) {
2016-03-03 17:45:15 +03:00
if ( is_valid_drivename ( $ opt ) ) {
2013-01-04 09:57:11 +04:00
my $ drive = parse_drive ( $ opt , $ conf - > { $ opt } ) ;
2012-12-12 18:35:26 +04:00
my $ volid = $ drive - > { file } ;
next if ! $ volid ;
2019-12-09 16:08:09 +03:00
# mark volid as "in-use" for next step
2017-11-16 11:20:56 +03:00
$ referenced - > { $ volid } = 1 ;
2013-07-15 11:13:31 +04:00
if ( $ volid_hash - > { $ volid } &&
2013-05-27 10:25:39 +04:00
( my $ path = $ volid_hash - > { $ volid } - > { path } ) ) {
2017-11-16 11:20:56 +03:00
$ referencedpath - > { $ path } = 1 ;
2013-05-27 10:25:39 +04:00
}
2012-12-12 18:35:26 +04:00
2013-01-04 09:57:11 +04:00
next if drive_is_cdrom ( $ drive ) ;
2012-12-12 18:35:26 +04:00
next if ! $ volid_hash - > { $ volid } ;
2020-03-02 13:33:44 +03:00
my ( $ updated , $ old_size , $ new_size ) = PVE::QemuServer::Drive:: update_disksize ( $ drive , $ volid_hash ) ;
2019-12-09 16:08:09 +03:00
if ( defined ( $ updated ) ) {
2013-05-27 11:30:40 +04:00
$ changes = 1 ;
2019-12-09 16:08:09 +03:00
$ conf - > { $ opt } = print_drive ( $ updated ) ;
print "$prefix size of disk '$volid' ($opt) updated from $old_size to $new_size\n" ;
2013-05-27 11:30:40 +04:00
}
2012-12-12 18:35:26 +04:00
}
}
2013-05-27 10:25:39 +04:00
# remove 'unusedX' entry if volume is used
foreach my $ opt ( keys %$ conf ) {
next if $ opt !~ m/^unused\d+$/ ;
my $ volid = $ conf - > { $ opt } ;
my $ path = $ volid_hash - > { $ volid } - > { path } if $ volid_hash - > { $ volid } ;
2017-11-16 11:20:56 +03:00
if ( $ referenced - > { $ volid } || ( $ path && $ referencedpath - > { $ path } ) ) {
2019-12-09 16:08:09 +03:00
print "$prefix remove entry '$opt', its volume '$volid' is in use\n" ;
2013-05-27 10:25:39 +04:00
$ changes = 1 ;
delete $ conf - > { $ opt } ;
}
2017-11-16 11:20:56 +03:00
$ referenced - > { $ volid } = 1 ;
$ referencedpath - > { $ path } = 1 if $ path ;
2013-05-27 10:25:39 +04:00
}
2012-12-12 18:35:26 +04:00
foreach my $ volid ( sort keys %$ volid_hash ) {
next if $ volid =~ m/vm-$vmid-state-/ ;
2017-11-16 11:20:56 +03:00
next if $ referenced - > { $ volid } ;
2013-05-27 10:25:39 +04:00
my $ path = $ volid_hash - > { $ volid } - > { path } ;
next if ! $ path ; # just to be sure
2017-11-16 11:20:56 +03:00
next if $ referencedpath - > { $ path } ;
2012-12-12 18:35:26 +04:00
$ changes = 1 ;
2018-07-11 09:40:06 +03:00
my $ key = PVE::QemuConfig - > add_unused_volume ( $ conf , $ volid ) ;
2019-12-09 16:08:09 +03:00
print "$prefix add unreferenced volume '$volid' as '$key' to config\n" ;
2017-11-16 11:20:56 +03:00
$ referencedpath - > { $ path } = 1 ; # avoid to add more than once (aliases)
2012-12-12 18:35:26 +04:00
}
return $ changes ;
}
sub rescan {
2018-07-11 09:40:07 +03:00
my ( $ vmid , $ nolock , $ dryrun ) = @ _ ;
2012-12-12 18:35:26 +04:00
2016-03-25 16:01:36 +03:00
my $ cfg = PVE::Storage:: config ( ) ;
2012-12-12 18:35:26 +04:00
2018-07-05 09:46:11 +03:00
# FIXME: Remove once our RBD plugin can handle CT and VM on a single storage
# see: https://pve.proxmox.com/pipermail/pve-devel/2018-July/032900.html
2018-07-03 18:41:09 +03:00
foreach my $ stor ( keys % { $ cfg - > { ids } } ) {
delete ( $ cfg - > { ids } - > { $ stor } ) if ! $ cfg - > { ids } - > { $ stor } - > { content } - > { images } ;
}
2018-07-11 09:40:06 +03:00
print "rescan volumes...\n" ;
2012-12-12 18:35:26 +04:00
my $ volid_hash = scan_volids ( $ cfg , $ vmid ) ;
my $ updatefn = sub {
my ( $ vmid ) = @ _ ;
2016-03-07 14:41:12 +03:00
my $ conf = PVE::QemuConfig - > load_config ( $ vmid ) ;
2013-07-15 11:13:31 +04:00
2016-03-07 14:41:12 +03:00
PVE::QemuConfig - > check_lock ( $ conf ) ;
2012-12-12 18:35:26 +04:00
2013-04-19 12:45:46 +04:00
my $ vm_volids = { } ;
foreach my $ volid ( keys %$ volid_hash ) {
my $ info = $ volid_hash - > { $ volid } ;
$ vm_volids - > { $ volid } = $ info if $ info - > { vmid } && $ info - > { vmid } == $ vmid ;
}
2019-12-09 16:08:09 +03:00
my $ changes = update_disk_config ( $ vmid , $ conf , $ vm_volids ) ;
2012-12-12 18:35:26 +04:00
2018-07-11 09:40:07 +03:00
PVE::QemuConfig - > write_config ( $ vmid , $ conf ) if $ changes && ! $ dryrun ;
2012-12-12 18:35:26 +04:00
} ;
if ( defined ( $ vmid ) ) {
if ( $ nolock ) {
& $ updatefn ( $ vmid ) ;
} else {
2016-03-07 14:41:12 +03:00
PVE::QemuConfig - > lock_config ( $ vmid , $ updatefn , $ vmid ) ;
2012-12-12 18:35:26 +04:00
}
} else {
my $ vmlist = config_list ( ) ;
foreach my $ vmid ( keys %$ vmlist ) {
if ( $ nolock ) {
& $ updatefn ( $ vmid ) ;
} else {
2016-03-07 14:41:12 +03:00
PVE::QemuConfig - > lock_config ( $ vmid , $ updatefn , $ vmid ) ;
2013-07-15 11:13:31 +04:00
}
2012-12-12 18:35:26 +04:00
}
}
}
sub restore_vma_archive {
my ( $ archive , $ vmid , $ user , $ opts , $ comp ) = @ _ ;
my $ readfrom = $ archive ;
2018-02-22 19:15:24 +03:00
my $ cfg = PVE::Storage:: config ( ) ;
my $ commands = [] ;
my $ bwlimit = $ opts - > { bwlimit } ;
my $ dbg_cmdstring = '' ;
my $ add_pipe = sub {
my ( $ cmd ) = @ _ ;
push @$ commands , $ cmd ;
$ dbg_cmdstring . = ' | ' if length ( $ dbg_cmdstring ) ;
$ dbg_cmdstring . = PVE::Tools:: cmd2string ( $ cmd ) ;
2012-12-12 18:35:26 +04:00
$ readfrom = '-' ;
2018-02-22 19:15:24 +03:00
} ;
my $ input = undef ;
if ( $ archive eq '-' ) {
$ input = '<&STDIN' ;
} else {
# If we use a backup from a PVE defined storage we also consider that
# storage's rate limit:
my ( undef , $ volid ) = PVE::Storage:: path_to_volume_id ( $ cfg , $ archive ) ;
if ( defined ( $ volid ) ) {
my ( $ sid , undef ) = PVE::Storage:: parse_volume_id ( $ volid ) ;
my $ readlimit = PVE::Storage:: get_bandwidth_limit ( 'restore' , [ $ sid ] , $ bwlimit ) ;
if ( $ readlimit ) {
print STDERR "applying read rate limit: $readlimit\n" ;
2018-03-21 13:12:26 +03:00
my $ cstream = [ 'cstream' , '-t' , $ readlimit * 1024 , '--' , $ readfrom ] ;
2018-02-22 19:15:24 +03:00
$ add_pipe - > ( $ cstream ) ;
}
}
}
if ( $ comp ) {
my $ cmd ;
2012-12-12 18:35:26 +04:00
if ( $ comp eq 'gzip' ) {
2018-02-22 19:15:24 +03:00
$ cmd = [ 'zcat' , $ readfrom ] ;
2012-12-12 18:35:26 +04:00
} elsif ( $ comp eq 'lzop' ) {
2018-02-22 19:15:24 +03:00
$ cmd = [ 'lzop' , '-d' , '-c' , $ readfrom ] ;
2012-12-12 18:35:26 +04:00
} else {
die "unknown compression method '$comp'\n" ;
}
2018-02-22 19:15:24 +03:00
$ add_pipe - > ( $ cmd ) ;
2012-12-12 18:35:26 +04:00
}
my $ tmpdir = "/var/tmp/vzdumptmp$$" ;
rmtree $ tmpdir ;
# disable interrupts (always do cleanups)
2017-09-06 14:29:07 +03:00
local $ SIG { INT } =
local $ SIG { TERM } =
local $ SIG { QUIT } =
local $ SIG { HUP } = sub { warn "got interrupt - ignored\n" ; } ;
2012-12-12 18:35:26 +04:00
my $ mapfifo = "/var/tmp/vzdumptmp$$.fifo" ;
POSIX:: mkfifo ( $ mapfifo , 0600 ) ;
my $ fifofh ;
my $ openfifo = sub {
open ( $ fifofh , '>' , $ mapfifo ) || die $! ;
} ;
2018-02-22 19:15:24 +03:00
$ add_pipe - > ( [ 'vma' , 'extract' , '-v' , '-r' , $ mapfifo , $ readfrom , $ tmpdir ] ) ;
2012-12-12 18:35:26 +04:00
my $ oldtimeout ;
my $ timeout = 5 ;
my $ devinfo = { } ;
my $ rpcenv = PVE::RPCEnvironment:: get ( ) ;
2016-03-07 14:41:12 +03:00
my $ conffile = PVE::QemuConfig - > config_file ( $ vmid ) ;
2012-12-12 18:35:26 +04:00
my $ tmpfn = "$conffile.$$.tmp" ;
2019-12-13 14:11:58 +03:00
# Note: $oldconf is undef if VM does not exist
2016-03-07 14:41:12 +03:00
my $ cfs_path = PVE::QemuConfig - > cfs_config_path ( $ vmid ) ;
my $ oldconf = PVE::Cluster:: cfs_read_file ( $ cfs_path ) ;
2013-01-04 09:57:11 +04:00
2018-02-22 19:15:24 +03:00
my % storage_limits ;
2012-12-12 18:35:26 +04:00
my $ print_devmap = sub {
my $ virtdev_hash = { } ;
my $ cfgfn = "$tmpdir/qemu-server.conf" ;
# we can read the config - that is already extracted
my $ fh = IO::File - > new ( $ cfgfn , "r" ) ||
"unable to read qemu-server.conf - $!\n" ;
2015-11-25 12:20:04 +03:00
my $ fwcfgfn = "$tmpdir/qemu-server.fw" ;
2016-01-28 11:00:41 +03:00
if ( - f $ fwcfgfn ) {
my $ pve_firewall_dir = '/etc/pve/firewall' ;
mkdir $ pve_firewall_dir ; # make sure the dir exists
PVE::Tools:: file_copy ( $ fwcfgfn , "${pve_firewall_dir}/$vmid.fw" ) ;
}
2015-11-25 12:20:04 +03:00
2012-12-12 18:35:26 +04:00
while ( defined ( my $ line = <$fh> ) ) {
if ( $ line =~ m/^\#qmdump\#map:(\S+):(\S+):(\S*):(\S*):$/ ) {
my ( $ virtdev , $ devname , $ storeid , $ format ) = ( $ 1 , $ 2 , $ 3 , $ 4 ) ;
die "archive does not contain data for drive '$virtdev'\n"
if ! $ devinfo - > { $ devname } ;
if ( defined ( $ opts - > { storage } ) ) {
$ storeid = $ opts - > { storage } || 'local' ;
} elsif ( ! $ storeid ) {
$ storeid = 'local' ;
}
$ format = 'raw' if ! $ format ;
$ devinfo - > { $ devname } - > { devname } = $ devname ;
$ devinfo - > { $ devname } - > { virtdev } = $ virtdev ;
$ devinfo - > { $ devname } - > { format } = $ format ;
$ devinfo - > { $ devname } - > { storeid } = $ storeid ;
2013-07-15 11:13:31 +04:00
# check permission on storage
2012-12-12 18:35:26 +04:00
my $ pool = $ opts - > { pool } ; # todo: do we need that?
if ( $ user ne 'root@pam' ) {
$ rpcenv - > check ( $ user , "/storage/$storeid" , [ 'Datastore.AllocateSpace' ] ) ;
}
2018-02-22 19:15:24 +03:00
$ storage_limits { $ storeid } = $ bwlimit ;
2012-12-12 18:35:26 +04:00
$ virtdev_hash - > { $ virtdev } = $ devinfo - > { $ devname } ;
2019-05-16 15:07:01 +03:00
} elsif ( $ line =~ m/^((?:ide|sata|scsi)\d+):\s*(.*)\s*$/ ) {
my $ virtdev = $ 1 ;
my $ drive = parse_drive ( $ virtdev , $ 2 ) ;
if ( drive_is_cloudinit ( $ drive ) ) {
my ( $ storeid , $ volname ) = PVE::Storage:: parse_volume_id ( $ drive - > { file } ) ;
my $ scfg = PVE::Storage:: storage_config ( $ cfg , $ storeid ) ;
my $ format = qemu_img_format ( $ scfg , $ volname ) ; # has 'raw' fallback
my $ d = {
format = > $ format ,
storeid = > $ opts - > { storage } // $ storeid ,
size = > PVE::QemuServer::Cloudinit:: CLOUDINIT_DISK_SIZE ,
file = > $ drive - > { file } , # to make drive_is_cloudinit check possible
name = > "vm-$vmid-cloudinit" ,
2019-05-17 11:53:30 +03:00
is_cloudinit = > 1 ,
2019-05-16 15:07:01 +03:00
} ;
$ virtdev_hash - > { $ virtdev } = $ d ;
}
2012-12-12 18:35:26 +04:00
}
}
2018-02-22 19:15:24 +03:00
foreach my $ key ( keys % storage_limits ) {
my $ limit = PVE::Storage:: get_bandwidth_limit ( 'restore' , [ $ key ] , $ bwlimit ) ;
next if ! $ limit ;
print STDERR "rate limit for storage $key: $limit KiB/s\n" ;
$ storage_limits { $ key } = $ limit * 1024 ;
}
2012-12-12 18:35:26 +04:00
foreach my $ devname ( keys %$ devinfo ) {
2013-07-15 11:13:31 +04:00
die "found no device mapping information for device '$devname'\n"
if ! $ devinfo - > { $ devname } - > { virtdev } ;
2012-12-12 18:35:26 +04:00
}
2013-01-04 09:57:11 +04:00
# create empty/temp config
2013-07-15 11:13:31 +04:00
if ( $ oldconf ) {
2013-01-04 09:57:11 +04:00
PVE::Tools:: file_set_contents ( $ conffile , "memory: 128\n" ) ;
foreach_drive ( $ oldconf , sub {
my ( $ ds , $ drive ) = @ _ ;
2019-09-25 16:30:12 +03:00
return if drive_is_cdrom ( $ drive , 1 ) ;
2013-01-04 09:57:11 +04:00
my $ volid = $ drive - > { file } ;
return if ! $ volid || $ volid =~ m | ^ / | ;
my ( $ path , $ owner ) = PVE::Storage:: path ( $ cfg , $ volid ) ;
return if ! $ path || ! $ owner || ( $ owner != $ vmid ) ;
# Note: only delete disk we want to restore
# other volumes will become unused
if ( $ virtdev_hash - > { $ ds } ) {
2016-09-15 14:24:35 +03:00
eval { PVE::Storage:: vdisk_free ( $ cfg , $ volid ) ; } ;
if ( my $ err = $@ ) {
warn $ err ;
}
2013-01-04 09:57:11 +04:00
}
} ) ;
2016-02-16 12:05:41 +03:00
2019-05-17 11:53:50 +03:00
# delete vmstate files, after the restore we have no snapshots anymore
2016-02-16 12:05:41 +03:00
foreach my $ snapname ( keys % { $ oldconf - > { snapshots } } ) {
my $ snap = $ oldconf - > { snapshots } - > { $ snapname } ;
if ( $ snap - > { vmstate } ) {
eval { PVE::Storage:: vdisk_free ( $ cfg , $ snap - > { vmstate } ) ; } ;
if ( my $ err = $@ ) {
warn $ err ;
}
}
}
2013-01-04 09:57:11 +04:00
}
my $ map = { } ;
2012-12-12 18:35:26 +04:00
foreach my $ virtdev ( sort keys %$ virtdev_hash ) {
my $ d = $ virtdev_hash - > { $ virtdev } ;
my $ alloc_size = int ( ( $ d - > { size } + 1024 - 1 ) / 1024 ) ;
2018-02-22 19:15:24 +03:00
my $ storeid = $ d - > { storeid } ;
my $ scfg = PVE::Storage:: storage_config ( $ cfg , $ storeid ) ;
my $ map_opts = '' ;
if ( my $ limit = $ storage_limits { $ storeid } ) {
$ map_opts . = "throttling.bps=$limit:throttling.group=$storeid:" ;
}
2013-05-21 14:02:41 +04:00
# test if requested format is supported
2018-02-22 19:15:24 +03:00
my ( $ defFormat , $ validFormats ) = PVE::Storage:: storage_default_format ( $ cfg , $ storeid ) ;
2013-05-21 14:02:41 +04:00
my $ supported = grep { $ _ eq $ d - > { format } } @$ validFormats ;
$ d - > { format } = $ defFormat if ! $ supported ;
2019-05-17 11:53:30 +03:00
my $ name ;
if ( $ d - > { is_cloudinit } ) {
$ name = $ d - > { name } ;
$ name . = ".$d->{format}" if $ d - > { format } ne 'raw' ;
2019-05-16 15:07:01 +03:00
}
2019-05-17 11:53:50 +03:00
my $ volid = PVE::Storage:: vdisk_alloc ( $ cfg , $ storeid , $ vmid , $ d - > { format } , $ name , $ alloc_size ) ;
2012-12-12 18:35:26 +04:00
print STDERR "new volume ID is '$volid'\n" ;
$ d - > { volid } = $ volid ;
2019-05-17 11:53:50 +03:00
PVE::Storage:: activate_volumes ( $ cfg , [ $ volid ] ) ;
2015-09-08 17:05:21 +03:00
2012-12-12 18:35:26 +04:00
my $ write_zeros = 1 ;
2016-02-23 14:43:51 +03:00
if ( PVE::Storage:: volume_has_feature ( $ cfg , 'sparseinit' , $ volid ) ) {
2012-12-12 18:35:26 +04:00
$ write_zeros = 0 ;
}
2019-05-17 11:53:30 +03:00
if ( ! $ d - > { is_cloudinit } ) {
my $ path = PVE::Storage:: path ( $ cfg , $ volid ) ;
2019-05-16 15:07:01 +03:00
print $ fifofh "${map_opts}format=$d->{format}:${write_zeros}:$d->{devname}=$path\n" ;
2012-12-12 18:35:26 +04:00
2019-05-16 15:07:01 +03:00
print "map '$d->{devname}' to '$path' (write zeros = ${write_zeros})\n" ;
}
2012-12-12 18:35:26 +04:00
$ map - > { $ virtdev } = $ volid ;
}
$ fh - > seek ( 0 , 0 ) || die "seek failed - $!\n" ;
my $ outfd = new IO:: File ( $ tmpfn , "w" ) ||
die "unable to write config for VM $vmid\n" ;
my $ cookie = { netcount = > 0 } ;
while ( defined ( my $ line = <$fh> ) ) {
2013-07-15 11:13:31 +04:00
restore_update_config_line ( $ outfd , $ cookie , $ vmid , $ map , $ line , $ opts - > { unique } ) ;
2012-12-12 18:35:26 +04:00
}
$ fh - > close ( ) ;
$ outfd - > close ( ) ;
} ;
eval {
# enable interrupts
2017-09-14 16:19:39 +03:00
local $ SIG { INT } =
local $ SIG { TERM } =
local $ SIG { QUIT } =
local $ SIG { HUP } =
local $ SIG { PIPE } = sub { die "interrupted by signal\n" ; } ;
2012-12-12 18:35:26 +04:00
local $ SIG { ALRM } = sub { die "got timeout\n" ; } ;
$ oldtimeout = alarm ( $ timeout ) ;
my $ parser = sub {
my $ line = shift ;
print "$line\n" ;
if ( $ line =~ m/^DEV:\sdev_id=(\d+)\ssize:\s(\d+)\sdevname:\s(\S+)$/ ) {
my ( $ dev_id , $ size , $ devname ) = ( $ 1 , $ 2 , $ 3 ) ;
$ devinfo - > { $ devname } = { size = > $ size , dev_id = > $ dev_id } ;
} elsif ( $ line =~ m/^CTIME: / ) {
2014-05-17 11:07:18 +04:00
# we correctly received the vma config, so we can disable
2014-04-17 12:37:46 +04:00
# the timeout now for disk allocation (set to 10 minutes, so
# that we always timeout if something goes wrong)
alarm ( 600 ) ;
2012-12-12 18:35:26 +04:00
& $ print_devmap ( ) ;
print $ fifofh "done\n" ;
my $ tmp = $ oldtimeout || 0 ;
$ oldtimeout = undef ;
alarm ( $ tmp ) ;
close ( $ fifofh ) ;
}
} ;
2013-07-15 11:13:31 +04:00
2018-02-22 19:15:24 +03:00
print "restore vma archive: $dbg_cmdstring\n" ;
run_command ( $ commands , input = > $ input , outfunc = > $ parser , afterfork = > $ openfifo ) ;
2012-12-12 18:35:26 +04:00
} ;
my $ err = $@ ;
alarm ( $ oldtimeout ) if $ oldtimeout ;
2015-09-08 17:05:21 +03:00
my $ vollist = [] ;
foreach my $ devname ( keys %$ devinfo ) {
my $ volid = $ devinfo - > { $ devname } - > { volid } ;
push @$ vollist , $ volid if $ volid ;
}
PVE::Storage:: deactivate_volumes ( $ cfg , $ vollist ) ;
2012-12-12 18:35:26 +04:00
unlink $ mapfifo ;
if ( $ err ) {
rmtree $ tmpdir ;
unlink $ tmpfn ;
foreach my $ devname ( keys %$ devinfo ) {
my $ volid = $ devinfo - > { $ devname } - > { volid } ;
next if ! $ volid ;
eval {
if ( $ volid =~ m | ^ / | ) {
unlink $ volid || die 'unlink failed\n' ;
} else {
PVE::Storage:: vdisk_free ( $ cfg , $ volid ) ;
}
print STDERR "temporary volume '$volid' sucessfuly removed\n" ;
} ;
print STDERR "unable to cleanup '$volid' - $@" if $@ ;
}
die $ err ;
}
rmtree $ tmpdir ;
2013-01-04 09:57:11 +04:00
rename ( $ tmpfn , $ conffile ) ||
2012-12-12 18:35:26 +04:00
die "unable to commit configuration file '$conffile'\n" ;
2013-01-04 09:57:11 +04:00
PVE::Cluster:: cfs_update ( ) ; # make sure we read new file
2012-12-12 18:35:26 +04:00
eval { rescan ( $ vmid , 1 ) ; } ;
warn $@ if $@ ;
}
sub restore_tar_archive {
my ( $ archive , $ vmid , $ user , $ opts ) = @ _ ;
2011-10-19 13:27:42 +04:00
if ( $ archive ne '-' ) {
2013-01-04 09:57:11 +04:00
my $ firstfile = tar_archive_read_firstfile ( $ archive ) ;
2011-10-19 13:27:42 +04:00
die "ERROR: file '$archive' dos not lock like a QemuServer vzdump backup\n"
if $ firstfile ne 'qemu-server.conf' ;
}
2011-10-17 15:49:48 +04:00
2016-03-25 16:01:36 +03:00
my $ storecfg = PVE::Storage:: config ( ) ;
2013-01-28 12:54:00 +04:00
2019-11-08 17:02:50 +03:00
# avoid zombie disks when restoring over an existing VM -> cleanup first
# pass keep_empty_config=1 to keep the config (thus VMID) reserved for us
# skiplock=1 because qmrestore has set the 'create' lock itself already
2016-03-07 14:41:12 +03:00
my $ vmcfgfn = PVE::QemuConfig - > config_file ( $ vmid ) ;
2019-11-08 19:03:28 +03:00
destroy_vm ( $ storecfg , $ vmid , 1 , { lock = > 'restore' } ) if - f $ vmcfgfn ;
2013-01-04 09:57:11 +04:00
2011-10-17 15:49:48 +04:00
my $ tocmd = "/usr/lib/qemu-server/qmextract" ;
2011-10-25 13:37:56 +04:00
$ tocmd . = " --storage " . PVE::Tools:: shellquote ( $ opts - > { storage } ) if $ opts - > { storage } ;
2012-02-02 09:39:38 +04:00
$ tocmd . = " --pool " . PVE::Tools:: shellquote ( $ opts - > { pool } ) if $ opts - > { pool } ;
2011-10-17 15:49:48 +04:00
$ tocmd . = ' --prealloc' if $ opts - > { prealloc } ;
$ tocmd . = ' --info' if $ opts - > { info } ;
2012-02-02 09:39:38 +04:00
# tar option "xf" does not autodetect compression when read from STDIN,
2011-10-19 13:27:42 +04:00
# so we pipe to zcat
2011-10-25 13:37:56 +04:00
my $ cmd = "zcat -f|tar xf " . PVE::Tools:: shellquote ( $ archive ) . " " .
PVE::Tools:: shellquote ( "--to-command=$tocmd" ) ;
2011-10-17 15:49:48 +04:00
my $ tmpdir = "/var/tmp/vzdumptmp$$" ;
mkpath $ tmpdir ;
local $ ENV { VZDUMP_TMPDIR } = $ tmpdir ;
local $ ENV { VZDUMP_VMID } = $ vmid ;
2012-02-02 09:39:38 +04:00
local $ ENV { VZDUMP_USER } = $ user ;
2011-10-17 15:49:48 +04:00
2016-03-07 14:41:12 +03:00
my $ conffile = PVE::QemuConfig - > config_file ( $ vmid ) ;
2011-10-17 15:49:48 +04:00
my $ tmpfn = "$conffile.$$.tmp" ;
# disable interrupts (always do cleanups)
2017-09-14 16:19:39 +03:00
local $ SIG { INT } =
local $ SIG { TERM } =
local $ SIG { QUIT } =
local $ SIG { HUP } = sub { print STDERR "got interrupt - ignored\n" ; } ;
2011-10-17 15:49:48 +04:00
2012-01-27 12:35:26 +04:00
eval {
2011-10-17 15:49:48 +04:00
# enable interrupts
2017-09-14 16:19:39 +03:00
local $ SIG { INT } =
local $ SIG { TERM } =
local $ SIG { QUIT } =
local $ SIG { HUP } =
local $ SIG { PIPE } = sub { die "interrupted by signal\n" ; } ;
2011-10-17 15:49:48 +04:00
2011-10-19 13:27:42 +04:00
if ( $ archive eq '-' ) {
print "extracting archive from STDIN\n" ;
run_command ( $ cmd , input = > "<&STDIN" ) ;
} else {
print "extracting archive '$archive'\n" ;
run_command ( $ cmd ) ;
}
2011-10-17 15:49:48 +04:00
return if $ opts - > { info } ;
# read new mapping
my $ map = { } ;
my $ statfile = "$tmpdir/qmrestore.stat" ;
if ( my $ fd = IO::File - > new ( $ statfile , "r" ) ) {
while ( defined ( my $ line = <$fd> ) ) {
if ( $ line =~ m/vzdump:([^\s:]*):(\S+)$/ ) {
$ map - > { $ 1 } = $ 2 if $ 1 ;
} else {
print STDERR "unable to parse line in statfile - $line\n" ;
}
}
$ fd - > close ( ) ;
}
my $ confsrc = "$tmpdir/qemu-server.conf" ;
my $ srcfd = new IO:: File ( $ confsrc , "r" ) ||
die "unable to open file '$confsrc'\n" ;
my $ outfd = new IO:: File ( $ tmpfn , "w" ) ||
die "unable to write config for VM $vmid\n" ;
2012-12-12 18:35:26 +04:00
my $ cookie = { netcount = > 0 } ;
2011-10-17 15:49:48 +04:00
while ( defined ( my $ line = <$srcfd> ) ) {
2013-07-15 11:13:31 +04:00
restore_update_config_line ( $ outfd , $ cookie , $ vmid , $ map , $ line , $ opts - > { unique } ) ;
2011-10-17 15:49:48 +04:00
}
$ srcfd - > close ( ) ;
$ outfd - > close ( ) ;
} ;
2019-11-08 17:43:14 +03:00
if ( my $ err = $@ ) {
2011-10-17 15:49:48 +04:00
unlink $ tmpfn ;
2013-01-04 09:57:11 +04:00
tar_restore_cleanup ( $ storecfg , "$tmpdir/qmrestore.stat" ) if ! $ opts - > { info } ;
2011-10-17 15:49:48 +04:00
die $ err ;
2012-01-27 12:35:26 +04:00
}
2011-10-17 15:49:48 +04:00
rmtree $ tmpdir ;
rename $ tmpfn , $ conffile ||
die "unable to commit configuration file '$conffile'\n" ;
2012-12-12 18:35:26 +04:00
2013-01-04 09:57:11 +04:00
PVE::Cluster:: cfs_update ( ) ; # make sure we read new file
2012-12-12 18:35:26 +04:00
eval { rescan ( $ vmid , 1 ) ; } ;
warn $@ if $@ ;
2011-10-17 15:49:48 +04:00
} ;
2017-05-15 15:11:58 +03:00
sub foreach_storage_used_by_vm {
2012-09-12 13:59:48 +04:00
my ( $ conf , $ func ) = @ _ ;
my $ sidhash = { } ;
2017-05-15 15:12:00 +03:00
foreach_drive ( $ conf , sub {
my ( $ ds , $ drive ) = @ _ ;
return if drive_is_cdrom ( $ drive ) ;
2012-09-12 13:59:48 +04:00
my $ volid = $ drive - > { file } ;
my ( $ sid , $ volname ) = PVE::Storage:: parse_volume_id ( $ volid , 1 ) ;
2013-07-15 11:13:31 +04:00
$ sidhash - > { $ sid } = $ sid if $ sid ;
2017-05-15 15:12:00 +03:00
} ) ;
2012-09-12 13:59:48 +04:00
foreach my $ sid ( sort keys %$ sidhash ) {
& $ func ( $ sid ) ;
}
}
2019-11-20 21:06:15 +03:00
my $ qemu_snap_storage = {
rbd = > 1 ,
} ;
2015-05-06 10:57:34 +03:00
sub do_snapshots_with_qemu {
my ( $ storecfg , $ volid ) = @ _ ;
my $ storage_name = PVE::Storage:: parse_volume_id ( $ volid ) ;
2019-06-04 18:40:42 +03:00
my $ scfg = $ storecfg - > { ids } - > { $ storage_name } ;
2015-05-06 10:57:34 +03:00
2019-06-04 18:40:42 +03:00
if ( $ qemu_snap_storage - > { $ scfg - > { type } } && ! $ scfg - > { krbd } ) {
2015-05-06 10:57:34 +03:00
return 1 ;
}
if ( $ volid =~ m/\.(qcow2|qed)$/ ) {
return 1 ;
}
return undef ;
}
2016-02-15 13:45:56 +03:00
sub qga_check_running {
2018-05-30 09:20:25 +03:00
my ( $ vmid , $ nowarn ) = @ _ ;
2016-02-15 13:45:56 +03:00
2019-11-19 14:23:47 +03:00
eval { mon_cmd ( $ vmid , "guest-ping" , timeout = > 3 ) ; } ;
2016-02-15 13:45:56 +03:00
if ( $@ ) {
2018-05-30 09:20:25 +03:00
warn "Qemu Guest Agent is not running - $@" if ! $ nowarn ;
2016-02-15 13:45:56 +03:00
return 0 ;
}
return 1 ;
}
2013-02-14 14:58:49 +04:00
sub template_create {
my ( $ vmid , $ conf , $ disk ) = @ _ ;
my $ storecfg = PVE::Storage:: config ( ) ;
2013-02-15 11:44:12 +04:00
foreach_drive ( $ conf , sub {
my ( $ ds , $ drive ) = @ _ ;
return if drive_is_cdrom ( $ drive ) ;
return if $ disk && $ ds ne $ disk ;
my $ volid = $ drive - > { file } ;
2013-04-18 19:05:31 +04:00
return if ! PVE::Storage:: volume_has_feature ( $ storecfg , 'template' , $ volid ) ;
2013-02-15 11:44:12 +04:00
2013-02-14 14:58:49 +04:00
my $ voliddst = PVE::Storage:: vdisk_create_base ( $ storecfg , $ volid ) ;
$ drive - > { file } = $ voliddst ;
2019-12-05 18:11:01 +03:00
$ conf - > { $ ds } = print_drive ( $ drive ) ;
2016-03-07 14:41:12 +03:00
PVE::QemuConfig - > write_config ( $ vmid , $ conf ) ;
2013-02-14 14:58:49 +04:00
} ) ;
}
2019-03-07 15:43:11 +03:00
sub convert_iscsi_path {
my ( $ path ) = @ _ ;
if ( $ path =~ m | ^ iscsi: // ( [ ^ /]+)/ ( [ ^ /]+)/ ( . + ) $| ) {
my $ portal = $ 1 ;
my $ target = $ 2 ;
my $ lun = $ 3 ;
my $ initiator_name = get_initiator_name ( ) ;
return "file.driver=iscsi,file.transport=tcp,file.initiator-name=$initiator_name," .
"file.portal=$portal,file.target=$target,file.lun=$lun,driver=raw" ;
}
die "cannot convert iscsi path '$path', unkown format\n" ;
}
2013-04-29 10:41:01 +04:00
sub qemu_img_convert {
2016-03-18 14:20:33 +03:00
my ( $ src_volid , $ dst_volid , $ size , $ snapname , $ is_zero_initialized ) = @ _ ;
2013-04-29 10:41:01 +04:00
my $ storecfg = PVE::Storage:: config ( ) ;
my ( $ src_storeid , $ src_volname ) = PVE::Storage:: parse_volume_id ( $ src_volid , 1 ) ;
my ( $ dst_storeid , $ dst_volname ) = PVE::Storage:: parse_volume_id ( $ dst_volid , 1 ) ;
2019-10-17 14:32:34 +03:00
die "destination '$dst_volid' is not a valid volid form qemu-img convert\n" if ! $ dst_storeid ;
2015-11-12 13:16:50 +03:00
2019-10-17 14:32:34 +03:00
my $ cachemode ;
my $ src_path ;
my $ src_is_iscsi = 0 ;
2019-12-09 12:31:33 +03:00
my $ src_format ;
2015-11-12 13:16:50 +03:00
2019-10-17 14:32:34 +03:00
if ( $ src_storeid ) {
PVE::Storage:: activate_volumes ( $ storecfg , [ $ src_volid ] , $ snapname ) ;
2013-04-29 10:41:01 +04:00
my $ src_scfg = PVE::Storage:: storage_config ( $ storecfg , $ src_storeid ) ;
2019-10-17 14:32:34 +03:00
$ src_format = qemu_img_format ( $ src_scfg , $ src_volname ) ;
$ src_path = PVE::Storage:: path ( $ storecfg , $ src_volid , $ snapname ) ;
$ src_is_iscsi = ( $ src_path =~ m | ^ iscsi: // | ) ;
$ cachemode = 'none' if $ src_scfg - > { type } eq 'zfspool' ;
} elsif ( - f $ src_volid ) {
$ src_path = $ src_volid ;
2020-03-02 13:33:44 +03:00
if ( $ src_path =~ m/\.($PVE::QemuServer::Drive::QEMU_FORMAT_RE)$/ ) {
2019-10-17 14:32:34 +03:00
$ src_format = $ 1 ;
}
}
2013-04-29 10:41:01 +04:00
2019-10-17 14:32:34 +03:00
die "source '$src_volid' is not a valid volid nor path for qemu-img convert\n" if ! $ src_path ;
2013-04-29 10:41:01 +04:00
2019-10-17 14:32:34 +03:00
my $ dst_scfg = PVE::Storage:: storage_config ( $ storecfg , $ dst_storeid ) ;
my $ dst_format = qemu_img_format ( $ dst_scfg , $ dst_volname ) ;
my $ dst_path = PVE::Storage:: path ( $ storecfg , $ dst_volid ) ;
my $ dst_is_iscsi = ( $ dst_path =~ m | ^ iscsi: // | ) ;
2013-04-29 10:41:01 +04:00
2019-10-17 14:32:34 +03:00
my $ cmd = [] ;
push @$ cmd , '/usr/bin/qemu-img' , 'convert' , '-p' , '-n' ;
2019-12-09 12:31:33 +03:00
push @$ cmd , '-l' , "snapshot.name=$snapname"
if $ snapname && $ src_format && $ src_format eq "qcow2" ;
2019-10-17 14:32:34 +03:00
push @$ cmd , '-t' , 'none' if $ dst_scfg - > { type } eq 'zfspool' ;
push @$ cmd , '-T' , $ cachemode if defined ( $ cachemode ) ;
if ( $ src_is_iscsi ) {
push @$ cmd , '--image-opts' ;
$ src_path = convert_iscsi_path ( $ src_path ) ;
2019-12-09 12:31:33 +03:00
} elsif ( $ src_format ) {
2019-10-17 14:32:34 +03:00
push @$ cmd , '-f' , $ src_format ;
}
2019-03-07 15:43:11 +03:00
2019-10-17 14:32:34 +03:00
if ( $ dst_is_iscsi ) {
push @$ cmd , '--target-image-opts' ;
$ dst_path = convert_iscsi_path ( $ dst_path ) ;
} else {
push @$ cmd , '-O' , $ dst_format ;
}
2019-03-07 15:43:11 +03:00
2019-10-17 14:32:34 +03:00
push @$ cmd , $ src_path ;
2019-03-07 15:43:11 +03:00
2019-10-17 14:32:34 +03:00
if ( ! $ dst_is_iscsi && $ is_zero_initialized ) {
push @$ cmd , "zeroinit:$dst_path" ;
} else {
push @$ cmd , $ dst_path ;
}
2019-03-07 15:43:11 +03:00
2019-10-17 14:32:34 +03:00
my $ parser = sub {
my $ line = shift ;
if ( $ line =~ m/\((\S+)\/100\%\)/ ) {
my $ percent = $ 1 ;
my $ transferred = int ( $ size * $ percent / 100 ) ;
my $ remaining = $ size - $ transferred ;
2019-03-07 15:43:11 +03:00
2019-10-17 14:32:34 +03:00
print "transferred: $transferred bytes remaining: $remaining bytes total: $size bytes progression: $percent %\n" ;
2016-03-18 14:20:33 +03:00
}
2013-04-29 10:41:01 +04:00
2019-10-17 14:32:34 +03:00
} ;
2013-04-29 10:41:01 +04:00
2019-10-17 14:32:34 +03:00
eval { run_command ( $ cmd , timeout = > undef , outfunc = > $ parser ) ; } ;
my $ err = $@ ;
die "copy failed: $err" if $ err ;
2013-04-29 10:41:01 +04:00
}
sub qemu_img_format {
my ( $ scfg , $ volname ) = @ _ ;
2020-03-02 13:33:44 +03:00
if ( $ scfg - > { path } && $ volname =~ m/\.($PVE::QemuServer::Drive::QEMU_FORMAT_RE)$/ ) {
2013-04-29 10:41:01 +04:00
return $ 1 ;
2013-07-15 11:13:31 +04:00
} else {
2013-04-29 10:41:01 +04:00
return "raw" ;
}
}
2013-05-02 20:18:03 +04:00
sub qemu_drive_mirror {
2019-04-01 12:30:59 +03:00
my ( $ vmid , $ drive , $ dst_volid , $ vmiddst , $ is_zero_initialized , $ jobs , $ skipcomplete , $ qga , $ bwlimit ) = @ _ ;
2013-05-02 20:18:03 +04:00
2017-01-03 17:03:13 +03:00
$ jobs = { } if ! $ jobs ;
my $ qemu_target ;
my $ format ;
2017-01-03 17:03:18 +03:00
$ jobs - > { "drive-$drive" } = { } ;
2013-05-29 10:32:10 +04:00
2018-02-16 16:43:36 +03:00
if ( $ dst_volid =~ /^nbd:/ ) {
2018-02-15 15:43:10 +03:00
$ qemu_target = $ dst_volid ;
2017-01-03 17:03:13 +03:00
$ format = "nbd" ;
} else {
my $ storecfg = PVE::Storage:: config ( ) ;
my ( $ dst_storeid , $ dst_volname ) = PVE::Storage:: parse_volume_id ( $ dst_volid ) ;
my $ dst_scfg = PVE::Storage:: storage_config ( $ storecfg , $ dst_storeid ) ;
2013-05-02 20:18:03 +04:00
2017-01-03 17:03:13 +03:00
$ format = qemu_img_format ( $ dst_scfg , $ dst_volname ) ;
2014-11-09 17:13:01 +03:00
2017-01-03 17:03:13 +03:00
my $ dst_path = PVE::Storage:: path ( $ storecfg , $ dst_volid ) ;
2014-11-09 17:13:01 +03:00
2017-01-03 17:03:13 +03:00
$ qemu_target = $ is_zero_initialized ? "zeroinit:$dst_path" : $ dst_path ;
}
2016-03-18 14:20:33 +03:00
my $ opts = { timeout = > 10 , device = > "drive-$drive" , mode = > "existing" , sync = > "full" , target = > $ qemu_target } ;
2014-11-10 10:18:39 +03:00
$ opts - > { format } = $ format if $ format ;
2019-04-01 12:30:59 +03:00
if ( defined ( $ bwlimit ) ) {
2019-04-02 15:33:10 +03:00
$ opts - > { speed } = $ bwlimit * 1024 ;
print "drive mirror is starting for drive-$drive with bandwidth limit: ${bwlimit} KB/s\n" ;
2019-04-01 12:30:59 +03:00
} else {
print "drive mirror is starting for drive-$drive\n" ;
}
2014-11-09 17:13:01 +03:00
2019-03-30 11:36:30 +03:00
# if a job already runs for this device we get an error, catch it for cleanup
2019-11-19 14:23:47 +03:00
eval { mon_cmd ( $ vmid , "drive-mirror" , %$ opts ) ; } ;
2017-01-03 17:03:13 +03:00
if ( my $ err = $@ ) {
eval { PVE::QemuServer:: qemu_blockjobs_cancel ( $ vmid , $ jobs ) } ;
2019-03-30 11:36:30 +03:00
warn "$@\n" if $@ ;
die "mirroring error: $err\n" ;
2017-01-03 17:03:13 +03:00
}
2017-01-03 17:03:19 +03:00
qemu_drive_mirror_monitor ( $ vmid , $ vmiddst , $ jobs , $ skipcomplete , $ qga ) ;
2017-01-03 17:03:13 +03:00
}
sub qemu_drive_mirror_monitor {
2017-01-03 17:03:19 +03:00
my ( $ vmid , $ vmiddst , $ jobs , $ skipcomplete , $ qga ) = @ _ ;
2016-04-11 16:19:46 +03:00
2014-11-10 09:55:09 +03:00
eval {
2017-01-03 17:03:13 +03:00
my $ err_complete = 0 ;
2014-11-10 09:55:09 +03:00
while ( 1 ) {
2017-01-03 17:03:13 +03:00
die "storage migration timed out\n" if $ err_complete > 300 ;
2019-11-19 14:23:47 +03:00
my $ stats = mon_cmd ( $ vmid , "query-block-jobs" ) ;
2014-11-10 09:55:09 +03:00
2017-01-03 17:03:13 +03:00
my $ running_mirror_jobs = { } ;
foreach my $ stat ( @$ stats ) {
next if $ stat - > { type } ne 'mirror' ;
$ running_mirror_jobs - > { $ stat - > { device } } = $ stat ;
}
2014-11-10 09:55:09 +03:00
2017-01-03 17:03:13 +03:00
my $ readycounter = 0 ;
2014-12-04 15:07:59 +03:00
2017-01-03 17:03:13 +03:00
foreach my $ job ( keys %$ jobs ) {
if ( defined ( $ jobs - > { $ job } - > { complete } ) && ! defined ( $ running_mirror_jobs - > { $ job } ) ) {
print "$job : finished\n" ;
delete $ jobs - > { $ job } ;
next ;
}
2017-01-05 11:54:07 +03:00
die "$job: mirroring has been cancelled\n" if ! defined ( $ running_mirror_jobs - > { $ job } ) ;
2014-11-21 14:31:56 +03:00
2017-01-03 17:03:13 +03:00
my $ busy = $ running_mirror_jobs - > { $ job } - > { busy } ;
my $ ready = $ running_mirror_jobs - > { $ job } - > { ready } ;
if ( my $ total = $ running_mirror_jobs - > { $ job } - > { len } ) {
my $ transferred = $ running_mirror_jobs - > { $ job } - > { offset } || 0 ;
my $ remaining = $ total - $ transferred ;
my $ percent = sprintf "%.2f" , ( $ transferred * 100 / $ total ) ;
2014-11-10 09:55:09 +03:00
2017-01-03 17:03:13 +03:00
print "$job: transferred: $transferred bytes remaining: $remaining bytes total: $total bytes progression: $percent % busy: $busy ready: $ready \n" ;
}
2014-11-21 14:31:56 +03:00
2017-04-03 15:08:19 +03:00
$ readycounter + + if $ running_mirror_jobs - > { $ job } - > { ready } ;
2017-01-03 17:03:13 +03:00
}
2014-11-10 08:31:08 +03:00
2017-01-03 17:03:13 +03:00
last if scalar ( keys %$ jobs ) == 0 ;
if ( $ readycounter == scalar ( keys %$ jobs ) ) {
print "all mirroring jobs are ready \n" ;
last if $ skipcomplete ; #do the complete later
if ( $ vmiddst && $ vmiddst != $ vmid ) {
2017-11-10 11:47:43 +03:00
my $ agent_running = $ qga && qga_check_running ( $ vmid ) ;
if ( $ agent_running ) {
2017-01-03 17:03:19 +03:00
print "freeze filesystem\n" ;
2019-11-19 14:23:47 +03:00
eval { mon_cmd ( $ vmid , "guest-fsfreeze-freeze" ) ; } ;
2017-01-03 17:03:19 +03:00
} else {
print "suspend vm\n" ;
eval { PVE::QemuServer:: vm_suspend ( $ vmid , 1 ) ; } ;
}
2017-01-03 17:03:13 +03:00
# if we clone a disk for a new target vm, we don't switch the disk
PVE::QemuServer:: qemu_blockjobs_cancel ( $ vmid , $ jobs ) ;
2017-01-03 17:03:19 +03:00
2017-11-10 11:47:43 +03:00
if ( $ agent_running ) {
2017-01-03 17:03:19 +03:00
print "unfreeze filesystem\n" ;
2019-11-19 14:23:47 +03:00
eval { mon_cmd ( $ vmid , "guest-fsfreeze-thaw" ) ; } ;
2017-01-03 17:03:19 +03:00
} else {
print "resume vm\n" ;
eval { PVE::QemuServer:: vm_resume ( $ vmid , 1 , 1 ) ; } ;
}
2016-04-11 16:19:46 +03:00
last ;
2017-01-03 17:03:13 +03:00
} else {
foreach my $ job ( keys %$ jobs ) {
# try to switch the disk if source and destination are on the same guest
2017-01-05 11:54:07 +03:00
print "$job: Completing block job...\n" ;
2017-01-03 17:03:13 +03:00
2019-11-19 14:23:47 +03:00
eval { mon_cmd ( $ vmid , "block-job-complete" , device = > $ job ) } ;
2017-01-03 17:03:13 +03:00
if ( $@ =~ m/cannot be completed/ ) {
2017-01-05 11:54:07 +03:00
print "$job: Block job cannot be completed, try again.\n" ;
2017-01-03 17:03:13 +03:00
$ err_complete + + ;
} else {
2017-01-05 11:54:07 +03:00
print "$job: Completed successfully.\n" ;
2017-01-03 17:03:13 +03:00
$ jobs - > { $ job } - > { complete } = 1 ;
}
}
2016-04-11 16:19:46 +03:00
}
2014-11-10 09:55:09 +03:00
}
sleep 1 ;
2013-05-02 20:18:03 +04:00
}
2014-11-10 09:55:09 +03:00
} ;
2014-11-10 10:18:39 +03:00
my $ err = $@ ;
2014-11-10 09:55:09 +03:00
2014-11-10 10:18:39 +03:00
if ( $ err ) {
2017-01-03 17:03:13 +03:00
eval { PVE::QemuServer:: qemu_blockjobs_cancel ( $ vmid , $ jobs ) } ;
2014-11-10 10:18:39 +03:00
die "mirroring error: $err" ;
}
2017-01-03 17:03:13 +03:00
}
sub qemu_blockjobs_cancel {
my ( $ vmid , $ jobs ) = @ _ ;
foreach my $ job ( keys %$ jobs ) {
2017-01-05 11:54:07 +03:00
print "$job: Cancelling block job\n" ;
2019-11-19 14:23:47 +03:00
eval { mon_cmd ( $ vmid , "block-job-cancel" , device = > $ job ) ; } ;
2017-01-03 17:03:13 +03:00
$ jobs - > { $ job } - > { cancel } = 1 ;
}
while ( 1 ) {
2019-11-19 14:23:47 +03:00
my $ stats = mon_cmd ( $ vmid , "query-block-jobs" ) ;
2017-01-03 17:03:13 +03:00
my $ running_jobs = { } ;
foreach my $ stat ( @$ stats ) {
$ running_jobs - > { $ stat - > { device } } = $ stat ;
}
foreach my $ job ( keys %$ jobs ) {
2017-01-05 11:54:07 +03:00
if ( defined ( $ jobs - > { $ job } - > { cancel } ) && ! defined ( $ running_jobs - > { $ job } ) ) {
print "$job: Done.\n" ;
2017-01-03 17:03:13 +03:00
delete $ jobs - > { $ job } ;
}
}
last if scalar ( keys %$ jobs ) == 0 ;
sleep 1 ;
2013-05-02 20:18:03 +04:00
}
}
2013-05-29 10:32:10 +04:00
sub clone_disk {
2013-07-15 11:13:31 +04:00
my ( $ storecfg , $ vmid , $ running , $ drivename , $ drive , $ snapname ,
2019-04-01 12:31:04 +03:00
$ newvmid , $ storage , $ format , $ full , $ newvollist , $ jobs , $ skipcomplete , $ qga , $ bwlimit ) = @ _ ;
2013-05-29 10:32:10 +04:00
my $ newvolid ;
if ( ! $ full ) {
print "create linked clone of drive $drivename ($drive->{file})\n" ;
2014-07-04 12:25:47 +04:00
$ newvolid = PVE::Storage:: vdisk_clone ( $ storecfg , $ drive - > { file } , $ newvmid , $ snapname ) ;
2013-05-29 10:32:10 +04:00
push @$ newvollist , $ newvolid ;
} else {
2017-01-03 17:03:13 +03:00
2013-05-29 10:32:10 +04:00
my ( $ storeid , $ volname ) = PVE::Storage:: parse_volume_id ( $ drive - > { file } ) ;
$ storeid = $ storage if $ storage ;
2017-06-01 11:26:37 +03:00
my $ dst_format = resolve_dst_disk_format ( $ storecfg , $ storeid , $ volname , $ format ) ;
2013-05-29 10:32:10 +04:00
my ( $ size ) = PVE::Storage:: volume_size_info ( $ storecfg , $ drive - > { file } , 3 ) ;
print "create full clone of drive $drivename ($drive->{file})\n" ;
2018-02-23 13:07:19 +03:00
my $ name = undef ;
2019-11-19 11:25:54 +03:00
if ( drive_is_cloudinit ( $ drive ) ) {
$ name = "vm-$newvmid-cloudinit" ;
$ name . = ".$dst_format" if $ dst_format ne 'raw' ;
$ snapname = undef ;
$ size = PVE::QemuServer::Cloudinit:: CLOUDINIT_DISK_SIZE ;
}
2018-02-23 13:07:19 +03:00
$ newvolid = PVE::Storage:: vdisk_alloc ( $ storecfg , $ storeid , $ newvmid , $ dst_format , $ name , ( $ size / 1024 ) ) ;
2013-05-29 10:32:10 +04:00
push @$ newvollist , $ newvolid ;
2016-07-13 13:44:13 +03:00
PVE::Storage:: activate_volumes ( $ storecfg , [ $ newvolid ] ) ;
2015-09-03 14:12:13 +03:00
2019-11-19 11:25:54 +03:00
if ( drive_is_cloudinit ( $ drive ) ) {
goto no_data_clone ;
}
2016-03-18 14:20:33 +03:00
my $ sparseinit = PVE::Storage:: volume_has_feature ( $ storecfg , 'sparseinit' , $ newvolid ) ;
2013-05-29 10:32:10 +04:00
if ( ! $ running || $ snapname ) {
2019-04-01 12:31:02 +03:00
# TODO: handle bwlimits
2016-03-18 14:20:33 +03:00
qemu_img_convert ( $ drive - > { file } , $ newvolid , $ size , $ snapname , $ sparseinit ) ;
2013-05-29 10:32:10 +04:00
} else {
2016-10-17 13:20:45 +03:00
my $ kvmver = get_running_qemu_version ( $ vmid ) ;
2019-11-19 14:23:49 +03:00
if ( ! min_version ( $ kvmver , 2 , 7 ) ) {
2016-10-17 15:49:05 +03:00
die "drive-mirror with iothread requires qemu version 2.7 or higher\n"
if $ drive - > { iothread } ;
2016-10-17 13:20:45 +03:00
}
2016-08-20 11:02:29 +03:00
2019-04-01 12:31:04 +03:00
qemu_drive_mirror ( $ vmid , $ drivename , $ newvolid , $ newvmid , $ sparseinit , $ jobs , $ skipcomplete , $ qga , $ bwlimit ) ;
2013-07-15 11:13:31 +04:00
}
2013-05-29 10:32:10 +04:00
}
2019-11-19 11:25:54 +03:00
no_data_clone:
2013-05-29 10:32:10 +04:00
my ( $ size ) = PVE::Storage:: volume_size_info ( $ storecfg , $ newvolid , 3 ) ;
my $ disk = $ drive ;
$ disk - > { format } = undef ;
$ disk - > { file } = $ newvolid ;
$ disk - > { size } = $ size ;
return $ disk ;
}
2016-10-17 13:20:43 +03:00
sub get_running_qemu_version {
my ( $ vmid ) = @ _ ;
2019-11-19 14:23:47 +03:00
my $ res = mon_cmd ( $ vmid , "query-version" ) ;
2016-10-17 13:20:43 +03:00
return "$res->{qemu}->{major}.$res->{qemu}->{minor}" ;
}
2015-11-06 12:27:05 +03:00
sub qemu_use_old_bios_files {
my ( $ machine_type ) = @ _ ;
return if ! $ machine_type ;
my $ use_old_bios_files = undef ;
if ( $ machine_type =~ m/^(\S+)\.pxe$/ ) {
$ machine_type = $ 1 ;
$ use_old_bios_files = 1 ;
} else {
implement PVE Version addition for QEMU machine
With our QEMU 4.1.1 package we can pass a additional internal version
to QEMU's machine, it will be split out there and ignored, but
returned on a QMP 'query-machines' call.
This allows us to use it for increasing the granularity with which we
can roll-out HW layout changes/additions for VMs. Until now we
required a machine version bump, happening normally every major
release of QEMU, with seldom, for us irrelevant, exceptions.
This often delays rolling out a feature, which would break
live-migration, by several months. That can now be avoided, the new
"pve-version" component of the machine can be bumped at will, and
thus we are much more flexible.
That versions orders after the ($major, $minor) version components
from an stable release - it can thus also be reset on the next
release.
The implementation extends the qemu-machine REGEX, remembers
"pve-version" when doing a "query-machines" and integrates support
into the min_version and extract_version helpers.
We start out with a version of 1.
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
Reviewed-by: Stefan Reiter <s.reiter@proxmox.com>
2019-11-25 13:18:13 +03:00
my $ version = PVE::QemuServer::Machine:: extract_version ( $ machine_type , kvm_user_version ( ) ) ;
2015-11-06 12:27:05 +03:00
# Note: kvm version < 2.4 use non-efi pxe files, and have problems when we
# load new efi bios files on migration. So this hack is required to allow
# live migration from qemu-2.2 to qemu-2.4, which is sometimes used when
# updrading from proxmox-ve-3.X to proxmox-ve 4.0
2019-11-19 14:23:49 +03:00
$ use_old_bios_files = ! min_version ( $ version , 2 , 4 ) ;
2015-11-06 12:27:05 +03:00
}
return ( $ use_old_bios_files , $ machine_type ) ;
}
2018-11-12 16:10:36 +03:00
sub create_efidisk ($$$$$) {
my ( $ storecfg , $ storeid , $ vmid , $ fmt , $ arch ) = @ _ ;
2017-09-11 09:40:28 +03:00
2018-11-12 16:10:36 +03:00
my ( undef , $ ovmf_vars ) = get_ovmf_files ( $ arch ) ;
die "EFI vars default image not found\n" if ! - f $ ovmf_vars ;
2017-09-11 09:40:28 +03:00
2019-10-17 14:32:34 +03:00
my $ vars_size_b = - s $ ovmf_vars ;
my $ vars_size = PVE::Tools:: convert_size ( $ vars_size_b , 'b' = > 'kb' ) ;
2017-09-11 09:40:28 +03:00
my $ volid = PVE::Storage:: vdisk_alloc ( $ storecfg , $ storeid , $ vmid , $ fmt , undef , $ vars_size ) ;
PVE::Storage:: activate_volumes ( $ storecfg , [ $ volid ] ) ;
2019-10-17 14:32:34 +03:00
qemu_img_convert ( $ ovmf_vars , $ volid , $ vars_size_b , undef , 0 ) ;
2019-12-04 20:40:31 +03:00
my ( $ size ) = PVE::Storage:: volume_size_info ( $ storecfg , $ volid , 3 ) ;
2017-09-11 09:40:28 +03:00
2019-12-04 20:40:31 +03:00
return ( $ volid , $ size / 1024 ) ;
2017-09-11 09:40:28 +03:00
}
2015-03-19 13:06:12 +03:00
sub vm_iothreads_list {
my ( $ vmid ) = @ _ ;
2019-11-19 14:23:47 +03:00
my $ res = mon_cmd ( $ vmid , 'query-iothreads' ) ;
2015-03-19 13:06:12 +03:00
my $ iothreads = { } ;
foreach my $ iothread ( @$ res ) {
$ iothreads - > { $ iothread - > { id } } = $ iothread - > { "thread-id" } ;
}
return $ iothreads ;
}
2015-03-27 05:41:54 +03:00
sub scsihw_infos {
my ( $ conf , $ drive ) = @ _ ;
my $ maxdev = 0 ;
2017-02-13 14:00:26 +03:00
if ( ! $ conf - > { scsihw } || ( $ conf - > { scsihw } =~ m/^lsi/ ) ) {
2015-03-27 05:41:54 +03:00
$ maxdev = 7 ;
2015-03-27 08:15:01 +03:00
} elsif ( $ conf - > { scsihw } && ( $ conf - > { scsihw } eq 'virtio-scsi-single' ) ) {
2015-03-27 05:41:54 +03:00
$ maxdev = 1 ;
} else {
$ maxdev = 256 ;
}
my $ controller = int ( $ drive - > { index } / $ maxdev ) ;
2015-03-27 08:15:01 +03:00
my $ controller_prefix = ( $ conf - > { scsihw } && $ conf - > { scsihw } eq 'virtio-scsi-single' ) ? "virtioscsi" : "scsihw" ;
2015-03-27 05:41:54 +03:00
return ( $ maxdev , $ controller , $ controller_prefix ) ;
}
2015-03-27 08:15:01 +03:00
2016-11-08 04:56:01 +03:00
sub windows_version {
my ( $ ostype ) = @ _ ;
return 0 if ! $ ostype ;
my $ winversion = 0 ;
if ( $ ostype eq 'wxp' || $ ostype eq 'w2k3' || $ ostype eq 'w2k' ) {
$ winversion = 5 ;
} elsif ( $ ostype eq 'w2k8' || $ ostype eq 'wvista' ) {
$ winversion = 6 ;
} elsif ( $ ostype =~ m/^win(\d+)$/ ) {
$ winversion = $ 1 ;
}
return $ winversion ;
}
2017-06-01 11:26:37 +03:00
sub resolve_dst_disk_format {
my ( $ storecfg , $ storeid , $ src_volname , $ format ) = @ _ ;
my ( $ defFormat , $ validFormats ) = PVE::Storage:: storage_default_format ( $ storecfg , $ storeid ) ;
if ( ! $ format ) {
# if no target format is specified, use the source disk format as hint
if ( $ src_volname ) {
my $ scfg = PVE::Storage:: storage_config ( $ storecfg , $ storeid ) ;
$ format = qemu_img_format ( $ scfg , $ src_volname ) ;
} else {
return $ defFormat ;
}
}
# test if requested format is supported - else use default
my $ supported = grep { $ _ eq $ format } @$ validFormats ;
$ format = $ defFormat if ! $ supported ;
return $ format ;
}
2019-12-09 17:26:58 +03:00
# NOTE: if this logic changes, please update docs & possibly gui logic
sub find_vmstate_storage {
my ( $ conf , $ storecfg ) = @ _ ;
# first, return storage from conf if set
return $ conf - > { vmstatestorage } if $ conf - > { vmstatestorage } ;
my ( $ target , $ shared , $ local ) ;
foreach_storage_used_by_vm ( $ conf , sub {
my ( $ sid ) = @ _ ;
my $ scfg = PVE::Storage:: storage_config ( $ storecfg , $ sid ) ;
my $ dst = $ scfg - > { shared } ? \ $ shared : \ $ local ;
$$ dst = $ sid if ! $$ dst || $ scfg - > { path } ; # prefer file based storage
} ) ;
# second, use shared storage where VM has at least one disk
# third, use local storage where VM has at least one disk
# fall back to local storage
$ target = $ shared // $ local // 'local' ;
return $ target ;
}
2018-09-19 12:35:11 +03:00
sub generate_uuid {
2017-09-13 17:10:25 +03:00
my ( $ uuid , $ uuid_str ) ;
UUID:: generate ( $ uuid ) ;
UUID:: unparse ( $ uuid , $ uuid_str ) ;
2018-09-19 12:35:11 +03:00
return $ uuid_str ;
}
sub generate_smbios1_uuid {
return "uuid=" . generate_uuid ( ) ;
2017-09-13 17:10:25 +03:00
}
2018-11-09 18:11:18 +03:00
sub nbd_stop {
my ( $ vmid ) = @ _ ;
2019-11-19 14:23:47 +03:00
mon_cmd ( $ vmid , 'nbd-server-stop' ) ;
2018-11-09 18:11:18 +03:00
}
2019-09-11 15:07:42 +03:00
sub create_reboot_request {
my ( $ vmid ) = @ _ ;
open ( my $ fh , '>' , "/run/qemu-server/$vmid.reboot" )
or die "failed to create reboot trigger file: $!\n" ;
close ( $ fh ) ;
}
sub clear_reboot_request {
my ( $ vmid ) = @ _ ;
my $ path = "/run/qemu-server/$vmid.reboot" ;
my $ res = 0 ;
$ res = unlink ( $ path ) ;
die "could not remove reboot request for $vmid: $!"
if ! $ res && $! != POSIX:: ENOENT ;
return $ res ;
}
2015-09-06 17:01:59 +03:00
# bash completion helper
sub complete_backup_archives {
my ( $ cmdname , $ pname , $ cvalue ) = @ _ ;
my $ cfg = PVE::Storage:: config ( ) ;
my $ storeid ;
if ( $ cvalue =~ m/^([^:]+):/ ) {
$ storeid = $ 1 ;
}
my $ data = PVE::Storage:: template_list ( $ cfg , $ storeid , 'backup' ) ;
my $ res = [] ;
foreach my $ id ( keys %$ data ) {
foreach my $ item ( @ { $ data - > { $ id } } ) {
next if $ item - > { format } !~ m/^vma\.(gz|lzo)$/ ;
push @$ res , $ item - > { volid } if defined ( $ item - > { volid } ) ;
}
}
return $ res ;
}
my $ complete_vmid_full = sub {
my ( $ running ) = @ _ ;
my $ idlist = vmstatus ( ) ;
my $ res = [] ;
foreach my $ id ( keys %$ idlist ) {
my $ d = $ idlist - > { $ id } ;
if ( defined ( $ running ) ) {
next if $ d - > { template } ;
next if $ running && $ d - > { status } ne 'running' ;
next if ! $ running && $ d - > { status } eq 'running' ;
}
push @$ res , $ id ;
}
return $ res ;
} ;
sub complete_vmid {
return & $ complete_vmid_full ( ) ;
}
sub complete_vmid_stopped {
return & $ complete_vmid_full ( 0 ) ;
}
sub complete_vmid_running {
return & $ complete_vmid_full ( 1 ) ;
}
2015-09-07 09:13:07 +03:00
sub complete_storage {
my $ cfg = PVE::Storage:: config ( ) ;
my $ ids = $ cfg - > { ids } ;
my $ res = [] ;
foreach my $ sid ( keys %$ ids ) {
next if ! PVE::Storage:: storage_check_enabled ( $ cfg , $ sid , undef , 1 ) ;
2015-11-09 13:11:47 +03:00
next if ! $ ids - > { $ sid } - > { content } - > { images } ;
2015-09-07 09:13:07 +03:00
push @$ res , $ sid ;
}
return $ res ;
}
2019-11-18 17:23:18 +03:00
sub complete_migration_storage {
my ( $ cmd , $ param , $ current_value , $ all_args ) = @ _ ;
my $ targetnode = @$ all_args [ 1 ] ;
my $ cfg = PVE::Storage:: config ( ) ;
my $ ids = $ cfg - > { ids } ;
my $ res = [] ;
foreach my $ sid ( keys %$ ids ) {
next if ! PVE::Storage:: storage_check_enabled ( $ cfg , $ sid , $ targetnode , 1 ) ;
next if ! $ ids - > { $ sid } - > { content } - > { images } ;
push @$ res , $ sid ;
}
return $ res ;
}
2011-08-23 09:47:04 +04:00
1 ;