Compare commits
77 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
b31a318d0c | ||
|
9ee48450b8 | ||
|
187b970075 | ||
|
bee526001c | ||
|
0a26768991 | ||
|
1185453a06 | ||
|
6ea437b237 | ||
|
fb64d2c785 | ||
|
6e944a38e3 | ||
|
b6cf9f1576 | ||
|
93a9c349b5 | ||
|
41a2c7ab37 | ||
|
c3afbffc70 | ||
|
6234ee1f44 | ||
|
5f27a463a1 | ||
|
559587bd62 | ||
|
11daa6364c | ||
|
1a38ffbce6 | ||
|
edb91999d5 | ||
|
71d936e7b3 | ||
|
5ca628de2f | ||
|
aaf792a5be | ||
|
3b776617ba | ||
|
4798ac1879 | ||
|
742fbff57f | ||
|
4d26644a59 | ||
|
5a77d2ca78 | ||
|
b90046a85f | ||
|
f32b1f03b8 | ||
|
4839422a99 | ||
|
e37e8fc7cf | ||
|
af5ddc95c8 | ||
|
6b5d487836 | ||
|
372b737202 | ||
|
24cbb9d87e | ||
|
c0b1a72827 | ||
|
ac53f1f76c | ||
|
d52b1535cd | ||
|
6f7504f145 | ||
|
953c9d22f4 | ||
|
e7de384a49 | ||
|
e0c5485825 | ||
|
86aa9d441e | ||
|
4968c94488 | ||
|
04421bd745 | ||
|
97c2683749 | ||
|
d4dd47fc88 | ||
|
5886cca256 | ||
|
8e120220da | ||
|
0982871734 | ||
|
5caadba86f | ||
|
9e75c00ea4 | ||
|
56142f8c79 | ||
|
1be86a3678 | ||
|
21aa5446b1 | ||
|
0d08e3e868 | ||
|
d817b52449 | ||
|
b5f89880eb | ||
|
31e4ad5da2 | ||
|
0a47d5a825 | ||
|
0390b62e01 | ||
|
359334501a | ||
|
440908162e | ||
|
ebe48d7e23 | ||
|
300d590b85 | ||
|
4941b27bff | ||
|
57cbbfa969 | ||
|
d6ee17e74f | ||
|
d040c83a54 | ||
|
e9e1c7de50 | ||
|
cea4b35f49 | ||
|
22ee5b5c29 | ||
|
8922a71e26 | ||
|
3343e297fc | ||
|
39fe40d8a2 | ||
|
aacfcde0e6 | ||
|
ca2e1752d3 |
@ -32,21 +32,32 @@ sub get_included_vmids {
|
||||
}
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
name => 'get_backupinfo',
|
||||
name => 'index',
|
||||
path => '',
|
||||
method => 'GET',
|
||||
protected => 1,
|
||||
description => "Stub, waits for future use.",
|
||||
description => "Index for backup info related endpoints",
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {},
|
||||
additionalProperties => 0,
|
||||
properties => {},
|
||||
},
|
||||
returns => {
|
||||
type => 'string',
|
||||
description => 'Shows stub message',
|
||||
type => 'array',
|
||||
description => 'Directory index.',
|
||||
items => {
|
||||
type => "object",
|
||||
properties => {
|
||||
subdir => {
|
||||
type => 'string',
|
||||
description => 'API sub-directory endpoint',
|
||||
},
|
||||
},
|
||||
},
|
||||
links => [ { rel => 'child', href => "{subdir}" } ],
|
||||
},
|
||||
code => sub {
|
||||
return "Stub endpoint. There is nothing here yet.";
|
||||
return [
|
||||
{ subdir => 'not_backed_up' },
|
||||
];
|
||||
}});
|
||||
|
||||
__PACKAGE__->register_method({
|
||||
|
@ -131,6 +131,7 @@ __PACKAGE__->register_method ({
|
||||
{ name => 'replication' },
|
||||
{ name => 'tasks' },
|
||||
{ name => 'backup' },
|
||||
{ name => 'backupinfo' },
|
||||
{ name => 'ha' },
|
||||
{ name => 'status' },
|
||||
{ name => 'nextid' },
|
||||
@ -225,6 +226,11 @@ __PACKAGE__->register_method({
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
name => {
|
||||
description => "Name of the resource.",
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
node => get_standard_option('pve-node', {
|
||||
description => "The cluster node name (when type in node,storage,qemu,lxc).",
|
||||
optional => 1,
|
||||
|
@ -375,6 +375,28 @@ __PACKAGE__->register_method({
|
||||
status => {
|
||||
type => 'string', enum => ['running', 'stopped'],
|
||||
},
|
||||
type => {
|
||||
type => 'string',
|
||||
},
|
||||
id => {
|
||||
type => 'string',
|
||||
},
|
||||
user => {
|
||||
type => 'string',
|
||||
},
|
||||
exitstatus => {
|
||||
type => 'string',
|
||||
optional => 1,
|
||||
},
|
||||
upid => {
|
||||
type => 'string',
|
||||
},
|
||||
starttime => {
|
||||
type => 'number',
|
||||
},
|
||||
node => {
|
||||
type => 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
code => sub {
|
||||
|
@ -1,6 +1,6 @@
|
||||
include ../../defines.mk
|
||||
|
||||
SOURCES=vzdump.pm pvesubscription.pm pveceph.pm pveam.pm pvesr.pm pvenode.pm pvesh.pm pve5to6.pm
|
||||
SOURCES=vzdump.pm pvesubscription.pm pveceph.pm pveam.pm pvesr.pm pvenode.pm pvesh.pm pve5to6.pm pve6to7.pm
|
||||
|
||||
all:
|
||||
|
||||
|
@ -8,16 +8,25 @@ use PVE::API2::Ceph;
|
||||
use PVE::API2::LXC;
|
||||
use PVE::API2::Qemu;
|
||||
use PVE::API2::Certificates;
|
||||
use PVE::API2::Cluster::Ceph;
|
||||
|
||||
use PVE::AccessControl;
|
||||
use PVE::Ceph::Tools;
|
||||
use PVE::Cluster;
|
||||
use PVE::Corosync;
|
||||
use PVE::INotify;
|
||||
use PVE::JSONSchema;
|
||||
use PVE::NodeConfig;
|
||||
use PVE::RPCEnvironment;
|
||||
use PVE::Storage;
|
||||
use PVE::Tools qw(run_command);
|
||||
use PVE::Storage::Plugin;
|
||||
use PVE::Tools qw(run_command split_list);
|
||||
use PVE::QemuConfig;
|
||||
use PVE::QemuServer;
|
||||
use PVE::VZDump::Common;
|
||||
use PVE::LXC;
|
||||
use PVE::LXC::Config;
|
||||
use PVE::LXC::Setup;
|
||||
|
||||
use Term::ANSIColor;
|
||||
|
||||
@ -35,6 +44,8 @@ my $min_pve_major = 6;
|
||||
my $min_pve_minor = 4;
|
||||
my $min_pve_pkgrel = 1;
|
||||
|
||||
my $forced_legacy_cgroup = 0;
|
||||
|
||||
my $counters = {
|
||||
pass => 0,
|
||||
skip => 0,
|
||||
@ -169,7 +180,7 @@ sub check_pve_packages {
|
||||
log_fail("proxmox-ve package is too old, please upgrade to >= $min_pve_ver!");
|
||||
}
|
||||
|
||||
my ($krunning, $kinstalled) = (qr/5\.11/, 'pve-kernel-5.11');
|
||||
my ($krunning, $kinstalled) = (qr/5\.(?:13|15)/, 'pve-kernel-5.11');
|
||||
if (!$upgraded) {
|
||||
($krunning, $kinstalled) = (qr/5\.(?:4|11)/, 'pve-kernel-4.15');
|
||||
}
|
||||
@ -199,7 +210,7 @@ sub check_storage_health {
|
||||
|
||||
my $info = PVE::Storage::storage_info($cfg);
|
||||
|
||||
foreach my $storeid (keys %$info) {
|
||||
foreach my $storeid (sort keys %$info) {
|
||||
my $d = $info->{$storeid};
|
||||
if ($d->{enabled}) {
|
||||
if ($d->{type} eq 'sheepdog') {
|
||||
@ -297,17 +308,25 @@ sub check_cluster_corosync {
|
||||
if $conf_nodelist_count != $cfs_nodelist_count;
|
||||
|
||||
print "\nChecking nodelist entries..\n";
|
||||
my $nodelist_pass = 1;
|
||||
for my $cs_node (sort keys %$conf_nodelist) {
|
||||
my $entry = $conf_nodelist->{$cs_node};
|
||||
log_fail("$cs_node: no name entry in corosync.conf.")
|
||||
if !defined($entry->{name});
|
||||
log_fail("$cs_node: no nodeid configured in corosync.conf.")
|
||||
if !defined($entry->{nodeid});
|
||||
if (!defined($entry->{name})) {
|
||||
$nodelist_pass = 0;
|
||||
log_fail("$cs_node: no name entry in corosync.conf.");
|
||||
}
|
||||
if (!defined($entry->{nodeid})) {
|
||||
$nodelist_pass = 0;
|
||||
log_fail("$cs_node: no nodeid configured in corosync.conf.");
|
||||
}
|
||||
my $gotLinks = 0;
|
||||
for my $link (0..7) {
|
||||
$gotLinks++ if defined($entry->{"ring${link}_addr"});
|
||||
}
|
||||
log_fail("$cs_node: no ringX_addr (0 <= X <= 7) link defined in corosync.conf.") if $gotLinks <= 0;
|
||||
if ($gotLinks <= 0) {
|
||||
$nodelist_pass = 0;
|
||||
log_fail("$cs_node: no ringX_addr (0 <= X <= 7) link defined in corosync.conf.");
|
||||
}
|
||||
|
||||
my $verify_ring_ip = sub {
|
||||
my $key = shift;
|
||||
@ -315,11 +334,11 @@ sub check_cluster_corosync {
|
||||
my ($resolved_ip, undef) = PVE::Corosync::resolve_hostname_like_corosync($ring, $conf);
|
||||
if (defined($resolved_ip)) {
|
||||
if ($resolved_ip ne $ring) {
|
||||
$nodelist_pass = 0;
|
||||
log_warn("$cs_node: $key '$ring' resolves to '$resolved_ip'.\n Consider replacing it with the currently resolved IP address.");
|
||||
} else {
|
||||
log_pass("$cs_node: $key is configured to use IP address '$ring'");
|
||||
}
|
||||
} else {
|
||||
$nodelist_pass = 0;
|
||||
log_fail("$cs_node: unable to resolve $key '$ring' to an IP address according to Corosync's resolve strategy - cluster will potentially fail with Corosync 3.x/kronosnet!");
|
||||
}
|
||||
}
|
||||
@ -328,42 +347,38 @@ sub check_cluster_corosync {
|
||||
$verify_ring_ip->("ring${link}_addr");
|
||||
}
|
||||
}
|
||||
log_pass("nodelist settings OK") if $nodelist_pass;
|
||||
|
||||
print "\nChecking totem settings..\n";
|
||||
my $totem = $conf->{main}->{totem};
|
||||
my $totem_pass = 1;
|
||||
|
||||
my $transport = $totem->{transport};
|
||||
if (defined($transport)) {
|
||||
if ($transport ne 'knet') {
|
||||
$totem_pass = 0;
|
||||
log_fail("Corosync transport explicitly set to '$transport' instead of implicit default!");
|
||||
} else {
|
||||
log_pass("Corosync transport set to '$transport'.");
|
||||
}
|
||||
} else {
|
||||
log_pass("Corosync transport set to implicit default.");
|
||||
}
|
||||
|
||||
# TODO: are those values still up-to-date?
|
||||
if ((!defined($totem->{secauth}) || $totem->{secauth} ne 'on') && (!defined($totem->{crypto_cipher}) || $totem->{crypto_cipher} eq 'none')) {
|
||||
$totem_pass = 0;
|
||||
log_fail("Corosync authentication/encryption is not explicitly enabled (secauth / crypto_cipher / crypto_hash)!");
|
||||
} else {
|
||||
if (defined($totem->{crypto_cipher}) && $totem->{crypto_cipher} eq '3des') {
|
||||
log_fail("Corosync encryption cipher set to '3des', no longer supported in Corosync 3.x!"); # FIXME: can be removed?
|
||||
} else {
|
||||
log_pass("Corosync encryption and authentication enabled.");
|
||||
}
|
||||
} elsif (defined($totem->{crypto_cipher}) && $totem->{crypto_cipher} eq '3des') {
|
||||
$totem_pass = 0;
|
||||
log_fail("Corosync encryption cipher set to '3des', no longer supported in Corosync 3.x!"); # FIXME: can be removed?
|
||||
}
|
||||
|
||||
log_pass("totem settings OK") if $totem_pass;
|
||||
print "\n";
|
||||
log_info("run 'pvecm status' to get detailed cluster status..");
|
||||
|
||||
print_header("CHECKING INSTALLED COROSYNC VERSION");
|
||||
if (defined(my $corosync = $get_pkg->('corosync'))) {
|
||||
if ($corosync->{OldVersion} =~ m/^2\./) {
|
||||
log_fail("corosync 2.x installed, cluster-wide upgrade to 3.x needed!");
|
||||
} elsif ($corosync->{OldVersion} =~ m/^3\./) {
|
||||
log_pass("corosync 3.x installed.");
|
||||
} else {
|
||||
log_fail("unexpected corosync version installed: $corosync->{OldVersion}!");
|
||||
log_fail("\ncorosync 2.x installed, cluster-wide upgrade to 3.x needed!");
|
||||
} elsif ($corosync->{OldVersion} !~ m/^3\./) {
|
||||
log_fail("\nunexpected corosync version installed: $corosync->{OldVersion}!");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -380,9 +395,12 @@ sub check_ceph {
|
||||
|
||||
log_info("getting Ceph status/health information..");
|
||||
my $ceph_status = eval { PVE::API2::Ceph->status({ node => $nodename }); };
|
||||
my $osd_flags = eval { PVE::API2::Ceph->get_flags({ node => $nodename }); };
|
||||
my $noout = eval { PVE::API2::Cluster::Ceph->get_flag({ flag => "noout" }); };
|
||||
if ($@) {
|
||||
log_fail("failed to get 'noout' flag status - $@");
|
||||
}
|
||||
|
||||
my $noout_wanted = 1;
|
||||
my $noout = $osd_flags && $osd_flags =~ m/noout/;
|
||||
|
||||
if (!$ceph_status || !$ceph_status->{health}) {
|
||||
log_fail("unable to determine Ceph status!");
|
||||
@ -400,19 +418,6 @@ sub check_ceph {
|
||||
}
|
||||
}
|
||||
|
||||
log_info("getting Ceph OSD flags..");
|
||||
eval {
|
||||
if (!$osd_flags) {
|
||||
log_fail("unable to get Ceph OSD flags!");
|
||||
} else {
|
||||
if ($osd_flags =~ m/recovery_deletes/ && $osd_flags =~ m/purged_snapdirs/) {
|
||||
log_pass("all PGs have been scrubbed at least once while running Ceph Luminous."); # FIXME: remove?
|
||||
} else {
|
||||
log_fail("missing 'recovery_deletes' and/or 'purged_snapdirs' flag, scrub of all PGs required before upgrading to Nautilus!");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
# TODO: check OSD min-required version, if to low it breaks stuff!
|
||||
|
||||
log_info("getting Ceph daemon versions..");
|
||||
@ -447,9 +452,7 @@ sub check_ceph {
|
||||
log_warn("unable to determine overall Ceph daemon versions!");
|
||||
} elsif (keys %$overall_versions == 1) {
|
||||
log_pass("single running overall version detected for all Ceph daemon types.");
|
||||
if ((keys %$overall_versions)[0] =~ /^ceph version 15\./) {
|
||||
$noout_wanted = 0;
|
||||
}
|
||||
$noout_wanted = 1; # off post-upgrade, on pre-upgrade
|
||||
} else {
|
||||
log_warn("overall version mismatch detected, check 'ceph versions' output for details!");
|
||||
}
|
||||
@ -462,7 +465,7 @@ sub check_ceph {
|
||||
log_warn("'noout' flag set, Ceph cluster upgrade seems finished.");
|
||||
}
|
||||
} elsif ($noout_wanted) {
|
||||
log_warn("'noout' flag not set - recommended to prevent rebalancing during upgrades.");
|
||||
log_warn("'noout' flag not set - recommended to prevent rebalancing during cluster-wide upgrades.");
|
||||
}
|
||||
|
||||
log_info("checking Ceph config..");
|
||||
@ -473,8 +476,6 @@ sub check_ceph {
|
||||
my $global_monhost = $global->{mon_host} // $global->{"mon host"} // $global->{"mon-host"};
|
||||
if (!defined($global_monhost)) {
|
||||
log_warn("No 'mon_host' entry found in ceph config.\n It's recommended to add mon_host with all monitor addresses (without ports) to the global section.");
|
||||
} else {
|
||||
log_pass("Found 'mon_host' entry.");
|
||||
}
|
||||
|
||||
my $ipv6 = $global->{ms_bind_ipv6} // $global->{"ms bind ipv6"} // $global->{"ms-bind-ipv6"};
|
||||
@ -482,17 +483,11 @@ sub check_ceph {
|
||||
my $ipv4 = $global->{ms_bind_ipv4} // $global->{"ms bind ipv4"} // $global->{"ms-bind-ipv4"};
|
||||
if ($ipv6 eq 'true' && (!defined($ipv4) || $ipv4 ne 'false')) {
|
||||
log_warn("'ms_bind_ipv6' is enabled but 'ms_bind_ipv4' is not disabled.\n Make sure to disable 'ms_bind_ipv4' for ipv6 only clusters, or add an ipv4 network to public/cluster network.");
|
||||
} else {
|
||||
log_pass("'ms_bind_ipv6' is enabled and 'ms_bind_ipv4' disabled");
|
||||
}
|
||||
} else {
|
||||
log_pass("'ms_bind_ipv6' not enabled");
|
||||
}
|
||||
|
||||
if (defined($global->{keyring})) {
|
||||
log_warn("[global] config section contains 'keyring' option, which will prevent services from starting with Nautilus.\n Move 'keyring' option to [client] section instead.");
|
||||
} else {
|
||||
log_pass("no 'keyring' option in [global] section found.");
|
||||
}
|
||||
|
||||
} else {
|
||||
@ -501,18 +496,565 @@ sub check_ceph {
|
||||
|
||||
my $local_ceph_ver = PVE::Ceph::Tools::get_local_version(1);
|
||||
if (defined($local_ceph_ver)) {
|
||||
if ($local_ceph_ver == 14) {
|
||||
my $ceph_volume_osds = PVE::Ceph::Tools::ceph_volume_list();
|
||||
my $scanned_osds = PVE::Tools::dir_glob_regex('/etc/ceph/osd', '^.*\.json$');
|
||||
if (-e '/var/lib/ceph/osd/' && !defined($scanned_osds) && !(keys %$ceph_volume_osds)) {
|
||||
log_warn("local Ceph version is Nautilus, local OSDs detected, but no conversion from ceph-disk to ceph-volume done (yet).");
|
||||
}
|
||||
if ($local_ceph_ver <= 14) {
|
||||
log_fail("local Ceph version too low, at least Octopus required..");
|
||||
}
|
||||
} else {
|
||||
log_fail("unable to determine local Ceph version.");
|
||||
}
|
||||
}
|
||||
|
||||
sub check_backup_retention_settings {
|
||||
log_info("Checking backup retention settings..");
|
||||
|
||||
my $pass = 1;
|
||||
|
||||
my $node_has_retention;
|
||||
|
||||
my $maxfiles_msg = "parameter 'maxfiles' is deprecated with PVE 7.x and will be removed in a " .
|
||||
"future version, use 'prune-backups' instead.";
|
||||
|
||||
eval {
|
||||
my $confdesc = PVE::VZDump::Common::get_confdesc();
|
||||
|
||||
my $fn = "/etc/vzdump.conf";
|
||||
my $raw = PVE::Tools::file_get_contents($fn);
|
||||
|
||||
my $conf_schema = { type => 'object', properties => $confdesc, };
|
||||
my $param = PVE::JSONSchema::parse_config($conf_schema, $fn, $raw);
|
||||
|
||||
if (defined($param->{maxfiles})) {
|
||||
$pass = 0;
|
||||
log_warn("$fn - $maxfiles_msg");
|
||||
}
|
||||
|
||||
$node_has_retention = defined($param->{maxfiles}) || defined($param->{'prune-backups'});
|
||||
};
|
||||
if (my $err = $@) {
|
||||
$pass = 0;
|
||||
log_warn("unable to parse node's VZDump configuration - $err");
|
||||
}
|
||||
|
||||
my $storage_cfg = PVE::Storage::config();
|
||||
|
||||
for my $storeid (keys $storage_cfg->{ids}->%*) {
|
||||
my $scfg = $storage_cfg->{ids}->{$storeid};
|
||||
|
||||
if (defined($scfg->{maxfiles})) {
|
||||
$pass = 0;
|
||||
log_warn("storage '$storeid' - $maxfiles_msg");
|
||||
}
|
||||
|
||||
next if !$scfg->{content}->{backup};
|
||||
next if defined($scfg->{maxfiles}) || defined($scfg->{'prune-backups'});
|
||||
next if $node_has_retention;
|
||||
|
||||
log_info("storage '$storeid' - no backup retention settings defined - by default, PVE " .
|
||||
"7.x will no longer keep only the last backup, but all backups");
|
||||
}
|
||||
|
||||
eval {
|
||||
my $vzdump_cron = PVE::Cluster::cfs_read_file('vzdump.cron');
|
||||
|
||||
# only warn once, there might be many jobs...
|
||||
if (scalar(grep { defined($_->{maxfiles}) } $vzdump_cron->{jobs}->@*)) {
|
||||
$pass = 0;
|
||||
log_warn("/etc/pve/vzdump.cron - $maxfiles_msg");
|
||||
}
|
||||
};
|
||||
if (my $err = $@) {
|
||||
$pass = 0;
|
||||
log_warn("unable to parse node's VZDump configuration - $err");
|
||||
}
|
||||
|
||||
log_pass("no problems found.") if $pass;
|
||||
}
|
||||
|
||||
sub check_cifs_credential_location {
|
||||
log_info("checking CIFS credential location..");
|
||||
|
||||
my $regex = qr/^(.*)\.cred$/;
|
||||
|
||||
my $found;
|
||||
|
||||
PVE::Tools::dir_glob_foreach('/etc/pve/priv/', $regex, sub {
|
||||
my ($filename) = @_;
|
||||
|
||||
my ($basename) = $filename =~ $regex;
|
||||
|
||||
log_warn("CIFS credentials '/etc/pve/priv/$filename' will be moved to " .
|
||||
"'/etc/pve/priv/storage/$basename.pw' during the update");
|
||||
|
||||
$found = 1;
|
||||
});
|
||||
|
||||
log_pass("no CIFS credentials at outdated location found.") if !$found;
|
||||
}
|
||||
|
||||
sub check_custom_pool_roles {
|
||||
log_info("Checking custom roles for pool permissions..");
|
||||
|
||||
if (! -f "/etc/pve/user.cfg") {
|
||||
log_skip("user.cfg does not exist");
|
||||
return;
|
||||
}
|
||||
|
||||
my $raw = eval { PVE::Tools::file_get_contents('/etc/pve/user.cfg'); };
|
||||
if ($@) {
|
||||
log_fail("Failed to read '/etc/pve/user.cfg' - $@");
|
||||
return;
|
||||
}
|
||||
|
||||
my $roles = {};
|
||||
while ($raw =~ /^\s*(.+?)\s*$/gm) {
|
||||
my $line = $1;
|
||||
my @data;
|
||||
|
||||
foreach my $d (split (/:/, $line)) {
|
||||
$d =~ s/^\s+//;
|
||||
$d =~ s/\s+$//;
|
||||
push @data, $d
|
||||
}
|
||||
|
||||
my $et = shift @data;
|
||||
next if $et ne 'role';
|
||||
|
||||
my ($role, $privlist) = @data;
|
||||
if (!PVE::AccessControl::verify_rolename($role, 1)) {
|
||||
warn "user config - ignore role '$role' - invalid characters in role name\n";
|
||||
next;
|
||||
}
|
||||
|
||||
$roles->{$role} = {} if !$roles->{$role};
|
||||
foreach my $priv (split_list($privlist)) {
|
||||
$roles->{$role}->{$priv} = 1;
|
||||
}
|
||||
}
|
||||
|
||||
foreach my $role (sort keys %{$roles}) {
|
||||
if (PVE::AccessControl::role_is_special($role)) {
|
||||
next;
|
||||
}
|
||||
|
||||
if ($role eq "PVEPoolUser") {
|
||||
# the user created a custom role named PVEPoolUser
|
||||
log_fail("Custom role '$role' has a restricted name - a built-in role 'PVEPoolUser' will be available with the upgrade");
|
||||
} else {
|
||||
log_pass("Custom role '$role' has no restricted name");
|
||||
}
|
||||
|
||||
my $perms = $roles->{$role};
|
||||
if ($perms->{'Pool.Allocate'} && $perms->{'Pool.Audit'}) {
|
||||
log_pass("Custom role '$role' contains updated pool permissions");
|
||||
} elsif ($perms->{'Pool.Allocate'}) {
|
||||
log_warn("Custom role '$role' contains permission 'Pool.Allocate' - to ensure same behavior add 'Pool.Audit' to this role");
|
||||
} else {
|
||||
log_pass("Custom role '$role' contains no permissions that need to be updated");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
my sub check_max_length {
|
||||
my ($raw, $max_length, $warning) = @_;
|
||||
log_warn($warning) if defined($raw) && length($raw) > $max_length;
|
||||
}
|
||||
|
||||
sub check_node_and_guest_configurations {
|
||||
log_info("Checking node and guest description/note legnth..");
|
||||
|
||||
my @affected_nodes = grep {
|
||||
my $desc = PVE::NodeConfig::load_config($_)->{desc};
|
||||
defined($desc) && length($desc) > 64 * 1024
|
||||
} PVE::Cluster::get_nodelist();
|
||||
|
||||
if (scalar(@affected_nodes) > 0) {
|
||||
log_warn("Node config description of the following nodes too long for new limit of 64 KiB:\n "
|
||||
. join(', ', @affected_nodes));
|
||||
} else {
|
||||
log_pass("All node config descriptions fit in the new limit of 64 KiB");
|
||||
}
|
||||
|
||||
my $affected_guests_long_desc = [];
|
||||
my $affected_cts_cgroup_keys = [];
|
||||
|
||||
my $cts = PVE::LXC::config_list();
|
||||
for my $vmid (sort { $a <=> $b } keys %$cts) {
|
||||
my $conf = PVE::LXC::Config->load_config($vmid);
|
||||
|
||||
my $desc = $conf->{description};
|
||||
push @$affected_guests_long_desc, "CT $vmid" if defined($desc) && length($desc) > 8 * 1024;
|
||||
|
||||
my $lxc_raw_conf = $conf->{lxc};
|
||||
push @$affected_cts_cgroup_keys, "CT $vmid" if (grep (@$_[0] =~ /^lxc\.cgroup\./, @$lxc_raw_conf));
|
||||
}
|
||||
my $vms = PVE::QemuServer::config_list();
|
||||
for my $vmid (sort { $a <=> $b } keys %$vms) {
|
||||
my $desc = PVE::QemuConfig->load_config($vmid)->{description};
|
||||
push @$affected_guests_long_desc, "VM $vmid" if defined($desc) && length($desc) > 8 * 1024;
|
||||
}
|
||||
if (scalar($affected_guests_long_desc->@*) > 0) {
|
||||
log_warn("Guest config description of the following virtual-guests too long for new limit of 64 KiB:\n"
|
||||
." " . join(", ", $affected_guests_long_desc->@*));
|
||||
} else {
|
||||
log_pass("All guest config descriptions fit in the new limit of 8 KiB");
|
||||
}
|
||||
|
||||
log_info("Checking container configs for deprecated lxc.cgroup entries");
|
||||
|
||||
if (scalar($affected_cts_cgroup_keys->@*) > 0) {
|
||||
if ($forced_legacy_cgroup) {
|
||||
log_pass("Found legacy 'lxc.cgroup' keys, but system explicitly configured for legacy hybrid cgroup hierarchy.");
|
||||
} else {
|
||||
log_warn("The following CTs have 'lxc.cgroup' keys configured, which will be ignored in the new default unified cgroupv2:\n"
|
||||
." " . join(", ", $affected_cts_cgroup_keys->@*) ."\n"
|
||||
." Often it can be enough to change to the new 'lxc.cgroup2' prefix after the upgrade to Proxmox VE 7.x");
|
||||
}
|
||||
} else {
|
||||
log_pass("No legacy 'lxc.cgroup' keys found.");
|
||||
}
|
||||
}
|
||||
|
||||
sub check_storage_content {
|
||||
log_info("Checking storage content type configuration..");
|
||||
|
||||
my $found;
|
||||
my $pass = 1;
|
||||
|
||||
my $storage_cfg = PVE::Storage::config();
|
||||
|
||||
for my $storeid (sort keys $storage_cfg->{ids}->%*) {
|
||||
my $scfg = $storage_cfg->{ids}->{$storeid};
|
||||
|
||||
next if $scfg->{shared};
|
||||
next if !PVE::Storage::storage_check_enabled($storage_cfg, $storeid, undef, 1);
|
||||
|
||||
my $valid_content = PVE::Storage::Plugin::valid_content_types($scfg->{type});
|
||||
|
||||
if (scalar(keys $scfg->{content}->%*) == 0 && !$valid_content->{none}) {
|
||||
$pass = 0;
|
||||
log_fail("storage '$storeid' does not support configured content type 'none'");
|
||||
delete $scfg->{content}->{none}; # scan for guest images below
|
||||
}
|
||||
|
||||
next if $scfg->{content}->{images};
|
||||
next if $scfg->{content}->{rootdir};
|
||||
|
||||
# Skip 'iscsi(direct)' (and foreign plugins with potentially similiar behavior) with 'none',
|
||||
# because that means "use LUNs directly" and vdisk_list() in PVE 6.x still lists those.
|
||||
# It's enough to *not* skip 'dir', because it is the only other storage that supports 'none'
|
||||
# and 'images' or 'rootdir', hence being potentially misconfigured.
|
||||
next if $scfg->{type} ne 'dir' && $scfg->{content}->{none};
|
||||
|
||||
eval { PVE::Storage::activate_storage($storage_cfg, $storeid) };
|
||||
if (my $err = $@) {
|
||||
log_warn("activating '$storeid' failed - $err");
|
||||
next;
|
||||
}
|
||||
|
||||
my $res = eval { PVE::Storage::vdisk_list($storage_cfg, $storeid); };
|
||||
if (my $err = $@) {
|
||||
log_warn("listing images on '$storeid' failed - $err");
|
||||
next;
|
||||
}
|
||||
my @volids = map { $_->{volid} } $res->{$storeid}->@*;
|
||||
|
||||
my $number = scalar(@volids);
|
||||
if ($number > 0) {
|
||||
log_info("storage '$storeid' - neither content type 'images' nor 'rootdir' configured"
|
||||
.", but found $number guest volume(s)");
|
||||
}
|
||||
}
|
||||
|
||||
my $check_volid = sub {
|
||||
my ($volid, $vmid, $vmtype, $reference) = @_;
|
||||
|
||||
my $guesttext = $vmtype eq 'qemu' ? 'VM' : 'CT';
|
||||
my $prefix = "$guesttext $vmid - volume '$volid' ($reference)";
|
||||
|
||||
my ($storeid) = PVE::Storage::parse_volume_id($volid, 1);
|
||||
return if !defined($storeid);
|
||||
|
||||
my $scfg = $storage_cfg->{ids}->{$storeid};
|
||||
if (!$scfg) {
|
||||
$pass = 0;
|
||||
log_warn("$prefix - storage does not exist!");
|
||||
return;
|
||||
}
|
||||
|
||||
# cannot use parse_volname for containers, as it can return 'images'
|
||||
# but containers cannot have ISO images attached, so assume 'rootdir'
|
||||
my $vtype = 'rootdir';
|
||||
if ($vmtype eq 'qemu') {
|
||||
($vtype) = eval { PVE::Storage::parse_volname($storage_cfg, $volid); };
|
||||
return if $@;
|
||||
}
|
||||
|
||||
if (!$scfg->{content}->{$vtype}) {
|
||||
$found = 1;
|
||||
$pass = 0;
|
||||
log_warn("$prefix - storage does not have content type '$vtype' configured.");
|
||||
}
|
||||
};
|
||||
|
||||
my $cts = PVE::LXC::config_list();
|
||||
for my $vmid (sort { $a <=> $b } keys %$cts) {
|
||||
my $conf = PVE::LXC::Config->load_config($vmid);
|
||||
|
||||
my $volhash = {};
|
||||
|
||||
my $check = sub {
|
||||
my ($ms, $mountpoint, $reference) = @_;
|
||||
|
||||
my $volid = $mountpoint->{volume};
|
||||
return if !$volid || $mountpoint->{type} ne 'volume';
|
||||
|
||||
return if $volhash->{$volid}; # volume might be referenced multiple times
|
||||
|
||||
$volhash->{$volid} = 1;
|
||||
|
||||
$check_volid->($volid, $vmid, 'lxc', $reference);
|
||||
};
|
||||
|
||||
my $opts = { include_unused => 1 };
|
||||
PVE::LXC::Config->foreach_volume_full($conf, $opts, $check, 'in config');
|
||||
for my $snapname (keys $conf->{snapshots}->%*) {
|
||||
my $snap = $conf->{snapshots}->{$snapname};
|
||||
PVE::LXC::Config->foreach_volume_full($snap, $opts, $check, "in snapshot '$snapname'");
|
||||
}
|
||||
}
|
||||
|
||||
my $vms = PVE::QemuServer::config_list();
|
||||
for my $vmid (sort { $a <=> $b } keys %$vms) {
|
||||
my $conf = PVE::QemuConfig->load_config($vmid);
|
||||
|
||||
my $volhash = {};
|
||||
|
||||
my $check = sub {
|
||||
my ($key, $drive, $reference) = @_;
|
||||
|
||||
my $volid = $drive->{file};
|
||||
return if $volid =~ m|^/|;
|
||||
|
||||
return if $volhash->{$volid}; # volume might be referenced multiple times
|
||||
|
||||
$volhash->{$volid} = 1;
|
||||
|
||||
$check_volid->($volid, $vmid, 'qemu', $reference);
|
||||
};
|
||||
|
||||
my $opts = {
|
||||
extra_keys => ['vmstate'],
|
||||
include_unused => 1,
|
||||
};
|
||||
# startup from a suspended state works even without 'images' content type on the
|
||||
# state storage, so do not check 'vmstate' for $conf
|
||||
PVE::QemuConfig->foreach_volume_full($conf, { include_unused => 1 }, $check, 'in config');
|
||||
for my $snapname (keys $conf->{snapshots}->%*) {
|
||||
my $snap = $conf->{snapshots}->{$snapname};
|
||||
PVE::QemuConfig->foreach_volume_full($snap, $opts, $check, "in snapshot '$snapname'");
|
||||
}
|
||||
}
|
||||
|
||||
if ($found) {
|
||||
log_warn("Proxmox VE 7.0 enforces stricter content type checks. The guests above " .
|
||||
"might not work until the storage configuration is fixed.");
|
||||
}
|
||||
|
||||
if ($pass) {
|
||||
log_pass("no problems found");
|
||||
}
|
||||
}
|
||||
|
||||
sub check_containers_cgroup_compat {
|
||||
if ($forced_legacy_cgroup) {
|
||||
log_skip("System explicitly configured for legacy hybrid cgroup hierarchy.");
|
||||
return;
|
||||
}
|
||||
|
||||
my $supports_cgroupv2 = sub {
|
||||
my ($conf, $rootdir, $ctid) = @_;
|
||||
|
||||
my $get_systemd_version = sub {
|
||||
my ($self) = @_;
|
||||
|
||||
my $sd_lib_dir = -d "/lib/systemd" ? "/lib/systemd" : "/usr/lib/systemd";
|
||||
my $libsd = PVE::Tools::dir_glob_regex($sd_lib_dir, "libsystemd-shared-.+\.so");
|
||||
if (defined($libsd) && $libsd =~ /libsystemd-shared-(\d+)\.so/) {
|
||||
return $1;
|
||||
}
|
||||
|
||||
return undef;
|
||||
};
|
||||
|
||||
my $unified_cgroupv2_support = sub {
|
||||
my ($self) = @_;
|
||||
|
||||
# https://www.freedesktop.org/software/systemd/man/systemd.html
|
||||
# systemd is installed as symlink to /sbin/init
|
||||
my $systemd = CORE::readlink('/sbin/init');
|
||||
|
||||
# assume non-systemd init will run with unified cgroupv2
|
||||
if (!defined($systemd) || $systemd !~ m@/systemd$@) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
# systemd version 232 (e.g. debian stretch) supports the unified hierarchy
|
||||
my $sdver = $get_systemd_version->();
|
||||
if (!defined($sdver) || $sdver < 232) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
my $ostype = $conf->{ostype};
|
||||
if (!defined($ostype)) {
|
||||
log_warn("Found CT ($ctid) without 'ostype' set!");
|
||||
} elsif ($ostype eq 'devuan' || $ostype eq 'alpine') {
|
||||
return 1; # no systemd, no cgroup problems
|
||||
}
|
||||
|
||||
my $lxc_setup = PVE::LXC::Setup->new($conf, $rootdir);
|
||||
return $lxc_setup->protected_call($unified_cgroupv2_support);
|
||||
};
|
||||
|
||||
my $log_problem = sub {
|
||||
my ($ctid) = @_;
|
||||
log_warn("Found at least one CT ($ctid) which does not support running in a unified cgroup v2" .
|
||||
" layout.\n Either upgrade the Container distro or set systemd.unified_cgroup_hierarchy=0 " .
|
||||
"in the Proxmox VE hosts' kernel cmdline! Skipping further CT compat checks."
|
||||
);
|
||||
};
|
||||
|
||||
my $cts = eval { PVE::API2::LXC->vmlist({ node => $nodename }) };
|
||||
if ($@) {
|
||||
log_warn("Failed to retrieve information about this node's CTs - $@");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!defined($cts) || !scalar(@$cts)) {
|
||||
log_skip("No containers on node detected.");
|
||||
return;
|
||||
}
|
||||
|
||||
my @running_cts = sort { $a <=> $b } grep { $_->{status} eq 'running' } @$cts;
|
||||
my @offline_cts = sort { $a <=> $b } grep { $_->{status} ne 'running' } @$cts;
|
||||
|
||||
for my $ct (@running_cts) {
|
||||
my $ctid = $ct->{vmid};
|
||||
my $pid = eval { PVE::LXC::find_lxc_pid($ctid) };
|
||||
if (my $err = $@) {
|
||||
log_warn("Failed to get PID for running CT $ctid - $err");
|
||||
next;
|
||||
}
|
||||
my $rootdir = "/proc/$pid/root";
|
||||
my $conf = PVE::LXC::Config->load_config($ctid);
|
||||
|
||||
my $ret = eval { $supports_cgroupv2->($conf, $rootdir, $ctid) };
|
||||
if (my $err = $@) {
|
||||
log_warn("Failed to get cgroup support status for CT $ctid - $err");
|
||||
next;
|
||||
}
|
||||
if (!$ret) {
|
||||
$log_problem->($ctid);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
my $storage_cfg = PVE::Storage::config();
|
||||
for my $ct (@offline_cts) {
|
||||
my $ctid = $ct->{vmid};
|
||||
my ($conf, $rootdir, $ret);
|
||||
eval {
|
||||
$conf = PVE::LXC::Config->load_config($ctid);
|
||||
$rootdir = PVE::LXC::mount_all($ctid, $storage_cfg, $conf);
|
||||
$ret = $supports_cgroupv2->($conf, $rootdir, $ctid);
|
||||
};
|
||||
if (my $err = $@) {
|
||||
log_warn("Failed to load config and mount CT $ctid - $err");
|
||||
eval { PVE::LXC::umount_all($ctid, $storage_cfg, $conf) };
|
||||
next;
|
||||
}
|
||||
if (!$ret) {
|
||||
$log_problem->($ctid);
|
||||
eval { PVE::LXC::umount_all($ctid, $storage_cfg, $conf) };
|
||||
last;
|
||||
}
|
||||
|
||||
eval { PVE::LXC::umount_all($ctid, $storage_cfg, $conf) };
|
||||
}
|
||||
};
|
||||
|
||||
sub check_security_repo {
|
||||
log_info("Checking if the suite for the Debian security repository is correct..");
|
||||
|
||||
my $found = 0;
|
||||
|
||||
my $dir = '/etc/apt/sources.list.d';
|
||||
my $in_dir = 0;
|
||||
|
||||
my $check_file = sub {
|
||||
my ($file) = @_;
|
||||
|
||||
$file = "${dir}/${file}" if $in_dir;
|
||||
|
||||
my $raw = eval { PVE::Tools::file_get_contents($file) };
|
||||
return if !defined($raw);
|
||||
my @lines = split(/\n/, $raw);
|
||||
|
||||
my $number = 0;
|
||||
for my $line (@lines) {
|
||||
$number++;
|
||||
|
||||
next if length($line) == 0; # split would result in undef then...
|
||||
|
||||
($line) = split(/#/, $line);
|
||||
|
||||
next if $line !~ m/^deb[[:space:]]/; # is case sensitive
|
||||
|
||||
my $suite;
|
||||
|
||||
# catch any of
|
||||
# https://deb.debian.org/debian-security
|
||||
# http://security.debian.org/debian-security
|
||||
# http://security.debian.org/
|
||||
if ($line =~ m|https?://deb\.debian\.org/debian-security/?\s+(\S*)|i) {
|
||||
$suite = $1;
|
||||
} elsif ($line =~ m|https?://security\.debian\.org(?:.*?)\s+(\S*)|i) {
|
||||
$suite = $1;
|
||||
} else {
|
||||
next;
|
||||
}
|
||||
|
||||
$found = 1;
|
||||
|
||||
my $where = "in ${file}:${number}";
|
||||
|
||||
if ($suite eq 'buster/updates') {
|
||||
log_info("Make sure to change the suite of the Debian security repository " .
|
||||
"from 'buster/updates' to 'bullseye-security' - $where");
|
||||
} elsif ($suite eq 'bullseye-security') {
|
||||
log_pass("already using 'bullseye-security'");
|
||||
} else {
|
||||
log_fail("The new suite of the Debian security repository should be " .
|
||||
"'bullseye-security' - $where");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
$check_file->("/etc/apt/sources.list");
|
||||
|
||||
$in_dir = 1;
|
||||
|
||||
PVE::Tools::dir_glob_foreach($dir, '^.*\.list$', $check_file);
|
||||
|
||||
if (!$found) {
|
||||
# only warn, it might be defined in a .sources file or in a way not catched above
|
||||
log_warn("No Debian security repository detected in /etc/apt/sources.list and " .
|
||||
"/etc/apt/sources.list.d/*.list");
|
||||
}
|
||||
}
|
||||
|
||||
sub check_misc {
|
||||
print_header("MISCELLANEOUS CHECKS");
|
||||
my $ssh_config = eval { PVE::Tools::file_get_contents('/root/.ssh/config') };
|
||||
@ -529,8 +1071,8 @@ sub check_misc {
|
||||
$log_systemd_unit_state->('pvestatd.service');
|
||||
|
||||
my $root_free = PVE::Tools::df('/', 10);
|
||||
log_warn("Less than 2G free space on root file system.")
|
||||
if defined($root_free) && $root_free->{avail} < 2*1024*1024*1024;
|
||||
log_warn("Less than 4 GiB free space on root file system.")
|
||||
if defined($root_free) && $root_free->{avail} < 4*1024*1024*1024;
|
||||
|
||||
log_info("Checking for running guests..");
|
||||
my $running_guests = 0;
|
||||
@ -568,7 +1110,6 @@ sub check_misc {
|
||||
}
|
||||
}
|
||||
|
||||
log_info("Check node certificate's RSA key size");
|
||||
my $certs = PVE::API2::Certificates->info({ node => $nodename });
|
||||
my $certs_check = {
|
||||
'rsaEncryption' => {
|
||||
@ -581,27 +1122,42 @@ sub check_misc {
|
||||
},
|
||||
};
|
||||
|
||||
my $log_cert_heading_called;
|
||||
my $log_cert_heading_once = sub {
|
||||
return if $log_cert_heading_called;
|
||||
log_info("Check node certificate's RSA key size");
|
||||
$log_cert_heading_called = 1;
|
||||
};
|
||||
|
||||
my $certs_check_failed = 0;
|
||||
foreach my $cert (@$certs) {
|
||||
my ($type, $size, $fn) = $cert->@{qw(public-key-type public-key-bits filename)};
|
||||
|
||||
if (!defined($type) || !defined($size)) {
|
||||
$log_cert_heading_once->();
|
||||
log_warn("'$fn': cannot check certificate, failed to get it's type or size!");
|
||||
}
|
||||
|
||||
my $check = $certs_check->{$type};
|
||||
if (!defined($check)) {
|
||||
$log_cert_heading_once->();
|
||||
log_warn("'$fn': certificate's public key type '$type' unknown, check Debian Busters release notes");
|
||||
next;
|
||||
}
|
||||
|
||||
if ($size < $check->{minsize}) {
|
||||
$log_cert_heading_once->();
|
||||
log_fail("'$fn', certificate's $check->{name} public key size is less than 2048 bit");
|
||||
$certs_check_failed = 1;
|
||||
} else {
|
||||
log_pass("Certificate '$fn' passed Debian Busters security level for TLS connections ($size >= 2048)");
|
||||
}
|
||||
}
|
||||
|
||||
check_backup_retention_settings();
|
||||
check_cifs_credential_location();
|
||||
check_custom_pool_roles();
|
||||
check_node_and_guest_configurations();
|
||||
check_storage_content();
|
||||
check_security_repo();
|
||||
}
|
||||
|
||||
__PACKAGE__->register_method ({
|
||||
@ -612,18 +1168,35 @@ __PACKAGE__->register_method ({
|
||||
parameters => {
|
||||
additionalProperties => 0,
|
||||
properties => {
|
||||
full => {
|
||||
description => 'perform additional, expensive checks.',
|
||||
type => 'boolean',
|
||||
optional => 1,
|
||||
default => 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns => { type => 'null' },
|
||||
code => sub {
|
||||
my ($param) = @_;
|
||||
|
||||
my $kernel_cli = PVE::Tools::file_get_contents('/proc/cmdline');
|
||||
if ($kernel_cli =~ /systemd.unified_cgroup_hierarchy=0/){
|
||||
$forced_legacy_cgroup = 1;
|
||||
}
|
||||
|
||||
check_pve_packages();
|
||||
check_cluster_corosync();
|
||||
check_ceph();
|
||||
check_storage_health();
|
||||
check_misc();
|
||||
|
||||
if ($param->{full}) {
|
||||
check_containers_cgroup_compat();
|
||||
} else {
|
||||
log_skip("NOTE: Expensive checks, like CT cgroupv2 compat, not performed without '--full' parameter");
|
||||
}
|
||||
|
||||
print_header("SUMMARY");
|
||||
|
||||
my $total = 0;
|
||||
@ -646,7 +1219,4 @@ __PACKAGE__->register_method ({
|
||||
|
||||
our $cmddef = [ __PACKAGE__, 'checklist', [], {}];
|
||||
|
||||
# for now drop all unknown params and just check
|
||||
@ARGV = ();
|
||||
|
||||
1;
|
||||
|
@ -94,7 +94,8 @@ sub update_qemu_status {
|
||||
}
|
||||
$object =~ s/\s/\\ /g;
|
||||
|
||||
build_influxdb_payload($class, $txn, $data, $ctime, $object);
|
||||
# VMID is already added in base $object above, so exclude it from being re-added
|
||||
build_influxdb_payload($class, $txn, $data, $ctime, $object, { 'vmid' => 1 });
|
||||
}
|
||||
|
||||
sub update_lxc_status {
|
||||
@ -108,7 +109,8 @@ sub update_lxc_status {
|
||||
}
|
||||
$object =~ s/\s/\\ /g;
|
||||
|
||||
build_influxdb_payload($class, $txn, $data, $ctime, $object);
|
||||
# VMID is already added in base $object above, so exclude it from being re-added
|
||||
build_influxdb_payload($class, $txn, $data, $ctime, $object, { 'vmid' => 1 });
|
||||
}
|
||||
|
||||
sub update_storage_status {
|
||||
@ -246,27 +248,31 @@ sub test_connection {
|
||||
}
|
||||
|
||||
sub build_influxdb_payload {
|
||||
my ($class, $txn, $data, $ctime, $tags, $measurement, $instance) = @_;
|
||||
my ($class, $txn, $data, $ctime, $tags, $excluded, $measurement, $instance) = @_;
|
||||
|
||||
# 'abc' and '123' are both valid hostnames, that confuses influx's type detection
|
||||
my $to_quote = { name => 1 };
|
||||
|
||||
my @values = ();
|
||||
|
||||
foreach my $key (sort keys %$data) {
|
||||
next if defined($excluded) && $excluded->{$key};
|
||||
my $value = $data->{$key};
|
||||
next if !defined($value);
|
||||
|
||||
if (!ref($value) && $value ne '') {
|
||||
# value is scalar
|
||||
|
||||
if (defined(my $v = prepare_value($value))) {
|
||||
if (defined(my $v = prepare_value($value, $to_quote->{$key}))) {
|
||||
push @values, "$key=$v";
|
||||
}
|
||||
} elsif (ref($value) eq 'HASH') {
|
||||
# value is a hash
|
||||
|
||||
if (!defined($measurement)) {
|
||||
build_influxdb_payload($class, $txn, $value, $ctime, $tags, $key);
|
||||
build_influxdb_payload($class, $txn, $value, $ctime, $tags, $excluded, $key);
|
||||
} elsif(!defined($instance)) {
|
||||
build_influxdb_payload($class, $txn, $value, $ctime, $tags, $measurement, $key);
|
||||
build_influxdb_payload($class, $txn, $value, $ctime, $tags, $excluded, $measurement, $key);
|
||||
} else {
|
||||
push @values, get_recursive_values($value);
|
||||
}
|
||||
@ -302,9 +308,10 @@ sub get_recursive_values {
|
||||
}
|
||||
|
||||
sub prepare_value {
|
||||
my ($value) = @_;
|
||||
my ($value, $force_quote) = @_;
|
||||
|
||||
if (looks_like_number($value)) {
|
||||
# don't treat value like a number if quote is 1
|
||||
if (!$force_quote && looks_like_number($value)) {
|
||||
if (isnan($value) || isinf($value)) {
|
||||
# we cannot send influxdb NaN or Inf
|
||||
return undef;
|
||||
|
@ -1096,6 +1096,7 @@ sub exec_backup_task {
|
||||
'upload-log',
|
||||
[ $pbs_snapshot_name, $task->{tmplog} ],
|
||||
errmsg => "uploading backup task log failed",
|
||||
outfunc => sub {},
|
||||
);
|
||||
};
|
||||
debugmsg('warn', "$@") if $@; # $@ contains already error prefix
|
||||
|
@ -7,7 +7,7 @@ PERL_DOC_INC_DIRS=..
|
||||
include /usr/share/pve-doc-generator/pve-doc-generator.mk
|
||||
|
||||
SERVICES = pvestatd pveproxy pvedaemon spiceproxy
|
||||
CLITOOLS = vzdump pvesubscription pveceph pveam pvesr pvenode pvesh pve5to6
|
||||
CLITOOLS = vzdump pvesubscription pveceph pveam pvesr pvenode pvesh pve5to6 pve6to7
|
||||
|
||||
SCRIPTS = \
|
||||
${SERVICES} \
|
||||
@ -51,6 +51,9 @@ all: ${SERVICE_MANS} ${CLI_MANS} pvemailforward
|
||||
pve5to6.1:
|
||||
echo ".TH pve5to6 1" > $@
|
||||
|
||||
pve6to7.1:
|
||||
echo ".TH pve6to7 1" > $@
|
||||
|
||||
pveversion.1.pod: pveversion
|
||||
pveupgrade.1.pod: pveupgrade
|
||||
pvereport.1.pod: pvereport
|
||||
|
107
debian/changelog
vendored
107
debian/changelog
vendored
@ -1,3 +1,110 @@
|
||||
pve-manager (6.4-14) buster; urgency=medium
|
||||
|
||||
* pve6to7: ignore deb-src entries for security repo check
|
||||
|
||||
* pve6to7: update expected running kernel version to be either 5.13 or 5.15
|
||||
|
||||
* fix #3815: metrics: influxdb: coerce guest name always to string
|
||||
|
||||
* api: task: better document return schema
|
||||
|
||||
* vzdump: pbs: suppress output from upload-log command
|
||||
|
||||
* daily update timer: start randomized interval already on 01:00 to avoid
|
||||
systemd-timer bug w.r.t. DST change
|
||||
|
||||
* fix #3620: ui: dc/storage: delete empty pbs fingerprint on edit
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 14 Mar 2022 10:48:22 +0100
|
||||
|
||||
pve-manager (6.4-13) buster; urgency=medium
|
||||
|
||||
* pve6to7: add check for 'lxc.cgroup.*' keys in container config
|
||||
|
||||
* pve6to7: storage content: ignore unreferenced volumes on misconfigured
|
||||
storages, as that should not be an actual problem in praxis, and no
|
||||
data-loss would happen by default anyway.
|
||||
|
||||
* pve6to7: add check for Debian security repository switch
|
||||
|
||||
* ui: HA ressources: fix toggling edit button on selection
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 07 Jul 2021 18:46:39 +0200
|
||||
|
||||
pve-manager (6.4-12) buster; urgency=medium
|
||||
|
||||
* pve6to7: use new ceph flags API
|
||||
|
||||
* pve6to7: remove PASS noise for ceph checks that were relevant for luminous
|
||||
to nautilus
|
||||
|
||||
* pve6to7: enable noout-warning before upgrade
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 06 Jul 2021 15:51:01 +0200
|
||||
|
||||
pve-manager (6.4-11) pve; urgency=medium
|
||||
|
||||
* pve6to7: check best-effort for (systemd based) containers not supporting
|
||||
pure cgroup v2
|
||||
|
||||
* ve6to7: make cert check only report on errors
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 05 Jul 2021 17:44:45 +0200
|
||||
|
||||
pve-manager (6.4-10) pve; urgency=medium
|
||||
|
||||
* pve6to7: more fine-grained detection of misconfigured guest volumes
|
||||
|
||||
* pve6to7: skip user.cfg if it does not exist
|
||||
|
||||
* pve6to7: drop PASS for old Luminous -> Nautilus check
|
||||
|
||||
* api: cluster/backupinfo: rework index endpoint to actually return
|
||||
sub-directories (endpoints)
|
||||
|
||||
* api: cluster: add backupinfo to subdirectories
|
||||
|
||||
* api: cluster/resources: add 'name' property to return schema
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 02 Jul 2021 11:55:49 +0200
|
||||
|
||||
pve-manager (6.4-9) pve; urgency=medium
|
||||
|
||||
* ui: TFA: use correct user-id value when creating TOTP QR code
|
||||
|
||||
* pve6to7: add checks for:
|
||||
+ backup retention options
|
||||
+ CIFS credentials
|
||||
+ pool permissions
|
||||
+ add check for guest and node description length
|
||||
+ add check for guest images on storages without a fitting content-type set
|
||||
|
||||
* ui: fix Guest Summary Notes height limit
|
||||
|
||||
* fix #3470: ui: qemu/CmdMenu: fix confirm message for 'pause' cmd
|
||||
|
||||
* ui: ceph/Status: fix icon in status grid
|
||||
|
||||
* ui: dc: backup: fix job detail search
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 23 Jun 2021 20:46:40 +0200
|
||||
|
||||
pve-manager (6.4-8) pve; urgency=medium
|
||||
|
||||
* pveceph install: fix fallback for default version
|
||||
|
||||
* ui: Parser: fix bind and dev mounts for lxc
|
||||
|
||||
* add initial pve6to7 helper script
|
||||
|
||||
* ui: dc/RoleView: improve handling variable row height
|
||||
|
||||
* fix #3440: external metrics: InfluxDB: remove duplicate 'vmid' tag
|
||||
|
||||
* various preparatory fixes for ExtJS 7
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 27 May 2021 14:28:35 +0200
|
||||
|
||||
pve-manager (6.4-7) pve; urgency=medium
|
||||
|
||||
* pvereport: log pressure stall information
|
||||
|
3
debian/control
vendored
3
debian/control
vendored
@ -20,6 +20,7 @@ Build-Depends: debhelper (>= 11~),
|
||||
pve-cluster,
|
||||
pve-container,
|
||||
pve-doc-generator (>= 6.4-1),
|
||||
pve-eslint,
|
||||
qemu-server (>= 6.0-15),
|
||||
unzip,
|
||||
Maintainer: Proxmox Support Team <support@proxmox.com>
|
||||
@ -68,7 +69,7 @@ Depends: apt-transport-https | apt (>= 1.5~),
|
||||
perl (>= 5.10.0-19),
|
||||
postfix | mail-transport-agent,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 2.5-2),
|
||||
proxmox-widget-toolkit (>= 2.6-2),
|
||||
pve-cluster (>= 6.0-4),
|
||||
pve-container (>= 2.0-21),
|
||||
pve-docs,
|
||||
|
@ -2,8 +2,8 @@
|
||||
Description=Daily PVE download activities
|
||||
|
||||
[Timer]
|
||||
OnCalendar=*-*-* 2:00
|
||||
RandomizedDelaySec=4h
|
||||
OnCalendar=*-*-* 1:00
|
||||
RandomizedDelaySec=5h
|
||||
Persistent=true
|
||||
|
||||
[Install]
|
||||
|
@ -486,7 +486,7 @@ div.right-aligned {
|
||||
background-color: #FFCC00;
|
||||
}
|
||||
|
||||
.x-treelist-nav {
|
||||
.x-treelist-pve-nav {
|
||||
background-color: #f5f5f5;
|
||||
}
|
||||
|
||||
|
@ -367,9 +367,9 @@ Ext.define('PVE.Parser', {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const [, storage] = res.file.match(/^([a-z][a-z0-9\-_.]*[a-z0-9]):/i);
|
||||
if (storage) {
|
||||
res.storage = storage;
|
||||
const match = res.file.match(/^([a-z][a-z0-9\-_.]*[a-z0-9]):/i);
|
||||
if (match) {
|
||||
res.storage = match[1];
|
||||
res.type = 'volume';
|
||||
} else if (res.file.match(/^\/dev\//)) {
|
||||
res.type = 'device';
|
||||
|
@ -314,6 +314,16 @@ Ext.define('PVE.StdWorkspace', {
|
||||
{
|
||||
flex: 1,
|
||||
},
|
||||
{
|
||||
xtype: 'proxmoxEOLNotice',
|
||||
product: 'Proxmox VE',
|
||||
version: '6.4',
|
||||
eolDate: '2022-07-31',
|
||||
href: 'pve.proxmox.com/wiki/FAQ#faq-support-table',
|
||||
},
|
||||
{
|
||||
flex: 1,
|
||||
},
|
||||
{
|
||||
xtype: 'proxmoxHelpButton',
|
||||
hidden: false,
|
||||
|
@ -86,7 +86,7 @@ Ext.define('PVE.node.CephStatus', {
|
||||
renderer: function(value) {
|
||||
let health = PVE.Utils.map_ceph_health[value];
|
||||
let icon = PVE.Utils.get_health_icon(health);
|
||||
return `<i class="fa fa-fw ${icon}'"></i>`;
|
||||
return `<i class="fa fa-fw ${icon}"></i>`;
|
||||
},
|
||||
sorter: {
|
||||
sorterFn: function(a, b) {
|
||||
|
@ -515,8 +515,8 @@ Ext.define('PVE.dc.BackupDiskTree', {
|
||||
data = record.parentNode.data;
|
||||
}
|
||||
|
||||
for (const property in ['name', 'id', 'type']) {
|
||||
if (data[property] === null) {
|
||||
for (const property of ['name', 'id', 'type']) {
|
||||
if (!data[property]) {
|
||||
continue;
|
||||
}
|
||||
let v = data[property].toString();
|
||||
|
@ -71,6 +71,7 @@ Ext.define('PVE.dc.RoleView', {
|
||||
metaData.style = 'white-space:normal;'; // allow word wrap
|
||||
return value.replace(/,/g, ' ');
|
||||
},
|
||||
variableRowHeight: true,
|
||||
dataIndex: 'privs',
|
||||
flex: 1,
|
||||
},
|
||||
|
@ -29,7 +29,7 @@ Ext.define('PVE.window.TFAEdit', {
|
||||
'otpauth://totp/' +
|
||||
encodeURIComponent(values.issuer) +
|
||||
':' +
|
||||
encodeURIComponent(values.userid) +
|
||||
encodeURIComponent(me.userid) +
|
||||
'?secret=' + values.secret +
|
||||
'&period=' + values.step +
|
||||
'&digits=' + values.digits +
|
||||
|
@ -5,7 +5,6 @@ Ext.define('PVE.form.CalendarEvent', {
|
||||
editable: true,
|
||||
|
||||
valueField: 'value',
|
||||
displayField: 'text',
|
||||
queryMode: 'local',
|
||||
|
||||
store: {
|
||||
|
@ -138,14 +138,17 @@ Ext.define('PVE.form.VMCPUFlagSelector', {
|
||||
boxLabel: '-',
|
||||
boxLabelAlign: 'before',
|
||||
inputValue: '-',
|
||||
isFormField: false,
|
||||
},
|
||||
{
|
||||
checked: true,
|
||||
inputValue: '=',
|
||||
isFormField: false,
|
||||
},
|
||||
{
|
||||
boxLabel: '+',
|
||||
inputValue: '+',
|
||||
isFormField: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
|
@ -125,14 +125,13 @@ Ext.define('PVE.grid.ResourceGrid', {
|
||||
var ws = me.up('pveStdWorkspace');
|
||||
ws.selectById(record.data.id);
|
||||
},
|
||||
destroy: function() {
|
||||
rstore.un("load", () => updateGrid());
|
||||
afterrender: function() {
|
||||
updateGrid();
|
||||
},
|
||||
},
|
||||
columns: rstore.defaultColumns(),
|
||||
});
|
||||
me.callParent();
|
||||
updateGrid();
|
||||
rstore.on("load", () => updateGrid());
|
||||
me.mon(rstore, 'load', () => updateGrid());
|
||||
},
|
||||
});
|
||||
|
@ -67,6 +67,7 @@ Ext.define('PVE.ha.ResourcesView', {
|
||||
},
|
||||
},
|
||||
{
|
||||
xtype: 'proxmoxButton',
|
||||
text: gettext('Edit'),
|
||||
disabled: true,
|
||||
selModel: sm,
|
||||
|
@ -61,13 +61,16 @@ Ext.define('PVE.panel.Config', {
|
||||
items: {
|
||||
xtype: 'treelist',
|
||||
itemId: 'menu',
|
||||
ui: 'nav',
|
||||
ui: 'pve-nav',
|
||||
expanderOnly: true,
|
||||
expanderFirst: false,
|
||||
animation: false,
|
||||
singleExpand: false,
|
||||
listeners: {
|
||||
selectionchange: function(treeList, selection) {
|
||||
if (!selection) {
|
||||
return;
|
||||
}
|
||||
let view = this.up('panel');
|
||||
view.suspendLayout = true;
|
||||
view.activateCard(selection.data.id);
|
||||
|
@ -3,8 +3,6 @@ Ext.define('PVE.panel.GuestStatusView', {
|
||||
alias: 'widget.pveGuestStatusView',
|
||||
mixins: ['Proxmox.Mixin.CBind'],
|
||||
|
||||
height: 300,
|
||||
|
||||
cbindData: function(initialConfig) {
|
||||
var me = this;
|
||||
return {
|
||||
|
@ -54,6 +54,7 @@ Ext.define('PVE.qemu.Summary', {
|
||||
items = [
|
||||
{
|
||||
xtype: 'container',
|
||||
height: 300,
|
||||
layout: {
|
||||
type: 'hbox',
|
||||
align: 'stretch',
|
||||
|
@ -21,8 +21,9 @@ Ext.define('PVE.qemu.CmdMenu', {
|
||||
failure: (response, opts) => Ext.Msg.alert(gettext('Error'), response.htmlStatus),
|
||||
});
|
||||
};
|
||||
let confirmedVMCommand = (cmd, params) => {
|
||||
let msg = Proxmox.Utils.format_task_description(`qm${cmd}`, info.vmid);
|
||||
let confirmedVMCommand = (cmd, params, confirmTask) => {
|
||||
let task = confirmTask || `qm${cmd}`;
|
||||
let msg = Proxmox.Utils.format_task_description(task, info.vmid);
|
||||
Ext.Msg.confirm(gettext('Confirm'), msg, btn => {
|
||||
if (btn === 'yes') {
|
||||
vm_command(cmd, params);
|
||||
@ -65,7 +66,7 @@ Ext.define('PVE.qemu.CmdMenu', {
|
||||
iconCls: 'fa fa-fw fa-pause',
|
||||
hidden: stopped || suspended,
|
||||
disabled: stopped || suspended,
|
||||
handler: () => confirmedVMCommand('suspend'),
|
||||
handler: () => confirmedVMCommand('suspend', undefined, 'qmpause'),
|
||||
},
|
||||
{
|
||||
text: gettext('Hibernate'),
|
||||
|
@ -512,6 +512,7 @@ Ext.define('PVE.storage.PBSInputPanel', {
|
||||
emptyText: gettext('Server certificate SHA-256 fingerprint, required for self-signed certificates'),
|
||||
regex: /[A-Fa-f0-9]{2}(:[A-Fa-f0-9]{2}){31}/,
|
||||
regexText: gettext('Example') + ': AB:CD:EF:...',
|
||||
deleteEmpty: !me.isCreate,
|
||||
allowBlank: true,
|
||||
},
|
||||
];
|
||||
|
@ -229,7 +229,7 @@ Ext.define('PVE.guest.SnapshotTree', {
|
||||
listeners: {
|
||||
selectionchange: 'select',
|
||||
itemdblclick: 'editSnapshot',
|
||||
destroy: 'cancel',
|
||||
beforedestroy: 'cancel',
|
||||
},
|
||||
|
||||
layout: 'fit',
|
||||
|
Loading…
x
Reference in New Issue
Block a user