Merge commit '5d24b7d37eb43fe782aaac8e2cf3f3765a6aaecd'

This commit is contained in:
Алексей Шабалин 2024-10-17 17:18:53 +03:00
commit a3ae6230fa
56 changed files with 1092 additions and 282 deletions

View File

@ -348,7 +348,8 @@ __PACKAGE__->register_method({
# matchers.
my $metadata_fields = {
type => 'package-updates',
hostname => $hostname,
# Hostname (without domain part)
hostname => PVE::INotify::nodename(),
};
PVE::Notify::info(

View File

@ -11,7 +11,7 @@ use PVE::Tools qw(extract_param);
use PVE::Cluster qw(cfs_lock_file cfs_read_file cfs_write_file);
use PVE::RESTHandler;
use PVE::RPCEnvironment;
use PVE::JSONSchema;
use PVE::JSONSchema qw(get_standard_option);
use PVE::Storage;
use PVE::Exception qw(raise_param_exc);
use PVE::VZDump;
@ -59,7 +59,7 @@ sub assert_param_permission_common {
my ($rpcenv, $user, $param, $is_delete) = @_;
return if $user eq 'root@pam'; # always OK
for my $key (qw(tmpdir dumpdir script)) {
for my $key (qw(tmpdir dumpdir script job-id)) {
raise_param_exc({ $key => "Only root may set this option."}) if exists $param->{$key};
}
@ -156,7 +156,7 @@ __PACKAGE__->register_method({
items => {
type => "object",
properties => {
id => $vzdump_job_id_prop
id => get_standard_option('pve-backup-jobid'),
},
},
links => [ { rel => 'child', href => "{id}" } ],
@ -312,7 +312,7 @@ __PACKAGE__->register_method({
parameters => {
additionalProperties => 0,
properties => {
id => $vzdump_job_id_prop
id => get_standard_option('pve-backup-jobid'),
},
},
returns => {
@ -360,7 +360,7 @@ __PACKAGE__->register_method({
parameters => {
additionalProperties => 0,
properties => {
id => $vzdump_job_id_prop
id => get_standard_option('pve-backup-jobid'),
},
},
returns => { type => 'null' },
@ -427,7 +427,7 @@ __PACKAGE__->register_method({
parameters => {
additionalProperties => 0,
properties => PVE::VZDump::Common::json_config_properties({
id => $vzdump_job_id_prop,
id => get_standard_option('pve-backup-jobid'),
schedule => {
description => "Backup schedule. The format is a subset of `systemd` calendar events.",
type => 'string', format => 'pve-calendar-event',
@ -586,7 +586,7 @@ __PACKAGE__->register_method({
parameters => {
additionalProperties => 0,
properties => {
id => $vzdump_job_id_prop
id => get_standard_option('pve-backup-jobid'),
},
},
returns => {

View File

@ -133,6 +133,8 @@ __PACKAGE__->register_method ({
my $mds_id = $param->{name} // $nodename;
die "ID of the MDS cannot start with a number!\n" if ($mds_id =~ /^[0-9]/);
my $worker = sub {
my $timeout = PVE::Ceph::Tools::get_config('long_rados_timeout');
my $rados = PVE::RADOS->new(timeout => $timeout);

View File

@ -323,7 +323,7 @@ __PACKAGE__->register_method ({
# 'ceph-volume lvm batch' and they don't make a lot of sense on fast NVMEs anyway.
if ($param->{'osds-per-device'}) {
for my $type ( qw(db_dev wal_dev) ) {
raise_param_exc({ $type => "canot use 'osds-per-device' parameter with '${type}'" })
raise_param_exc({ $type => "cannot use 'osds-per-device' parameter with '${type}'" })
if $param->{$type};
}
}

View File

@ -6,8 +6,11 @@ use strict;
use PVE::Tools qw(extract_param extract_sensitive_params);
use PVE::Exception qw(raise_perm_exc raise_param_exc);
use PVE::JSONSchema qw(get_standard_option);
use PVE::INotify;
use PVE::RPCEnvironment;
use PVE::ExtMetric;
use PVE::PullMetric;
use PVE::SafeSyslog;
use PVE::RESTHandler;
@ -288,4 +291,184 @@ __PACKAGE__->register_method ({
return;
}});
__PACKAGE__->register_method ({
name => 'export',
path => 'export',
method => 'GET',
protected => 1,
description => "Retrieve metrics of the cluster.",
permissions => {
check => ['perm', '/', ['Sys.Audit']],
},
parameters => {
additionalProperties => 0,
properties => {
'local-only' => {
type => 'boolean',
description =>
'Only return metrics for the current node instead of the whole cluster',
optional => 1,
default => 0,
},
'start-time' => {
type => 'integer',
description => 'Only include metrics with a timestamp > start-time.',
optional => 1,
default => 0,
},
'history' => {
type => 'boolean',
description => 'Also return historic values.'
. ' Returns full available metric history unless `start-time` is also set',
optional => 1,
default => 0,
},
},
},
returns => {
type => 'object',
additionalProperties => 0,
properties => {
data => {
type => 'array',
description => 'Array of system metrics. Metrics are sorted by their timestamp.',
items => {
type => 'object',
additionalProperties => 0,
properties => {
timestamp => {
type => 'integer',
description => 'Time at which this metric was observed',
},
id => {
type => 'string',
description => "Unique identifier for this metric object,"
. " for instance 'node/<nodename>' or"
. " 'qemu/<vmid>'."
},
metric => {
type => 'string',
description => "Name of the metric.",
},
value => {
type => 'number',
description => 'Metric value.',
},
type => {
type => 'string',
description => 'Type of the metric.',
enum => [qw(gauge counter derive)],
}
}
},
},
}
},
code => sub {
my ($param) = @_;
my $local_only = $param->{'local-only'} // 0;
my $start = $param->{'start-time'};
my $history = $param->{'history'} // 0;
my $now = time();
my $generations;
if ($history) {
# Assuming update loop time of pvestatd of 10 seconds.
if (defined($start)) {
my $delta = $now - $start;
$generations = int($delta / 10);
} else {
$generations = PVE::PullMetric::max_generations();
}
} else {
$generations = 0;
};
my @metrics = @{PVE::PullMetric::get_local_metrics($generations)};
if (defined($start)) {
@metrics = grep {
$_->{timestamp} > ($start)
} @metrics;
}
my $nodename = PVE::INotify::nodename();
# Fan out to cluster members
# Do NOT remove this check
if (!$local_only) {
my $members = PVE::Cluster::get_members();
my $rpcenv = PVE::RPCEnvironment::get();
my $authuser = $rpcenv->get_user();
my ($user, undef) = PVE::AccessControl::split_tokenid($authuser, 1);
my $ticket;
if ($user) {
# Theoretically, we might now bypass token privilege separation, since
# we use the regular user instead of the token, but
# since we already passed the permission check for this handler,
# this should be fine.
$ticket = PVE::AccessControl::assemble_ticket($user);
} else {
$ticket = PVE::AccessControl::assemble_ticket($authuser);
}
for my $name (keys %$members) {
if ($name eq $nodename) {
# Skip own node, for that one we already have the metrics
next;
}
if (!$members->{$name}->{online}) {
next;
}
my $status = eval {
my $fingerprint = PVE::Cluster::get_node_fingerprint($name);
my $ip = scalar(PVE::Cluster::remote_node_ip($name));
my $conn_args = {
protocol => 'https',
host => $ip,
port => 8006,
ticket => $ticket,
timeout => 5,
};
$conn_args->{cached_fingerprints} = { $fingerprint => 1 };
my $api_client = PVE::APIClient::LWP->new(%$conn_args);
my $params = {
# Do NOT remove 'local-only' - potential for request recursion!
'local-only' => 1,
history => $history,
};
$params->{'start-time'} = $start if defined($start);
$api_client->get('/cluster/metrics/export', $params);
};
if ($@) {
syslog('warning', "could not fetch metrics from $name: $@");
} else {
push @metrics, $status->{data}->@*;
}
}
}
my @sorted = sort {$a->{timestamp} <=> $b->{timestamp}} @metrics;
return {
data => \@sorted,
};
},
});
1;

View File

@ -79,12 +79,151 @@ __PACKAGE__->register_method ({
{ name => 'endpoints' },
{ name => 'matchers' },
{ name => 'targets' },
{ name => 'matcher-fields' },
{ name => 'matcher-field-values' },
];
return $result;
}
});
__PACKAGE__->register_method ({
name => 'get_matcher_fields',
path => 'matcher-fields',
method => 'GET',
description => 'Returns known notification metadata fields',
permissions => {
check => ['or',
['perm', '/mapping/notifications', ['Mapping.Modify']],
['perm', '/mapping/notifications', ['Mapping.Audit']],
],
},
protected => 0,
parameters => {
additionalProperties => 0,
properties => {},
},
returns => {
type => 'array',
items => {
type => 'object',
properties => {
name => {
description => 'Name of the field.',
type => 'string',
},
},
},
links => [ { rel => 'child', href => '{name}' } ],
},
code => sub {
# TODO: Adapt this API handler once we have a 'notification registry'
my $result = [
{ name => 'type' },
{ name => 'hostname' },
{ name => 'job-id' },
];
return $result;
}
});
__PACKAGE__->register_method ({
name => 'get_matcher_field_values',
path => 'matcher-field-values',
method => 'GET',
description => 'Returns known notification metadata fields and their known values',
permissions => {
check => ['or',
['perm', '/mapping/notifications', ['Mapping.Modify']],
['perm', '/mapping/notifications', ['Mapping.Audit']],
],
},
protected => 1,
parameters => {
additionalProperties => 0,
},
returns => {
type => 'array',
items => {
type => 'object',
properties => {
'value' => {
description => 'Notification metadata value known by the system.',
type => 'string'
},
'comment' => {
description => 'Additional comment for this value.',
type => 'string',
optional => 1,
},
'field' => {
description => 'Field this value belongs to.',
type => 'string',
},
},
},
},
code => sub {
# TODO: Adapt this API handler once we have a 'notification registry'
my $rpcenv = PVE::RPCEnvironment::get();
my $user = $rpcenv->get_user();
my $values = [
{
value => 'package-updates',
field => 'type',
},
{
value => 'fencing',
field => 'type',
},
{
value => 'replication',
field => 'type',
},
{
value => 'vzdump',
field => 'type',
},
{
value => 'system-mail',
field => 'type',
},
];
# Here we need a manual permission check.
if ($rpcenv->check($user, "/", ["Sys.Audit"], 1)) {
for my $backup_job (@{PVE::API2::Backup->index({})}) {
push @$values, {
value => $backup_job->{id},
comment => $backup_job->{comment},
field => 'job-id'
};
}
}
# The API call returns only returns jobs for which the user
# has adequate permissions.
for my $sync_job (@{PVE::API2::ReplicationConfig->index({})}) {
push @$values, {
value => $sync_job->{id},
comment => $sync_job->{comment},
field => 'job-id'
};
}
for my $node (@{PVE::Cluster::get_nodelist()}) {
push @$values, {
value => $node,
field => 'hostname',
}
}
return $values;
}
});
__PACKAGE__->register_method ({
name => 'endpoints_index',
path => 'endpoints',

View File

@ -399,7 +399,7 @@ __PACKAGE__->register_method({
type => "object",
additionalProperties => 1,
properties => {
# TODO: document remaing ones
# TODO: document remaining ones
'boot-info' => {
description => "Meta-information about the boot mode.",
type => 'object',
@ -417,7 +417,7 @@ __PACKAGE__->register_method({
},
},
'current-kernel' => {
description => "The uptime of the system in seconds.",
description => "Meta-information about the currently booted kernel of this node.",
type => 'object',
properties => {
sysname => {
@ -906,6 +906,7 @@ __PACKAGE__->register_method({
check => ['perm', '/nodes/{node}', [ 'Sys.Syslog' ]],
},
protected => 1,
download_allowed => 1,
parameters => {
additionalProperties => 0,
properties => {

View File

@ -123,8 +123,10 @@ my sub _handle_job_err {
};
my $metadata_fields = {
# TODO: Add job-id?
type => "replication",
"job-id" => $job->{id},
# Hostname (without domain part)
hostname => PVE::INotify::nodename(),
};
eval {

View File

@ -327,6 +327,7 @@ __PACKAGE__->register_method({
user => 'all',
},
protected => 1,
download_allowed => 1,
description => "Read task log.",
proxyto => 'node',
parameters => {

View File

@ -42,8 +42,8 @@ __PACKAGE__->register_method ({
permissions => {
description => "The user needs 'VM.Backup' permissions on any VM, and "
."'Datastore.AllocateSpace' on the backup storage (and fleecing storage when fleecing "
."is used). The 'tmpdir', 'dumpdir' and 'script' parameters are restricted to the "
."'root\@pam' user. The 'maxfiles' and 'prune-backups' settings require "
."is used). The 'tmpdir', 'dumpdir', 'script' and 'job-id' parameters are restricted "
."to the 'root\@pam' user. The 'maxfiles' and 'prune-backups' settings require "
."'Datastore.Allocate' on the backup storage. The 'bwlimit', 'performance' and "
."'ionice' parameters require 'Sys.Modify' on '/'.",
user => 'all',
@ -53,6 +53,12 @@ __PACKAGE__->register_method ({
parameters => {
additionalProperties => 0,
properties => PVE::VZDump::Common::json_config_properties({
'job-id' => get_standard_option('pve-backup-jobid', {
description => "The ID of the backup job. If set, the 'backup-job' metadata field"
. " of the backup notification will be set to this value. Only root\@pam"
. " can set this parameter.",
optional => 1,
}),
stdout => {
type => 'boolean',
description => "Write tar to stdout, not to a file.",

View File

@ -84,9 +84,9 @@ sub read_aplinfo_from_fh {
my $template;
if ($res->{location}) {
$template = $res->{location};
$template =~ s|.*/([^/]+$PVE::Storage::vztmpl_extension_re)$|$1|;
$template =~ s|.*/([^/]+$PVE::Storage::VZTMPL_EXT_RE_1)$|$1|;
if ($res->{location} !~ m|^([a-zA-Z]+)\://|) {
# relative localtion (no http:// prefix)
# relative location (no http:// prefix)
$res->{location} = "$source/$res->{location}";
}
} else {

View File

@ -204,17 +204,30 @@ sub check_pve_packages {
}
# FIXME: better differentiate between 6.2 from bullseye or bookworm
my ($krunning, $kinstalled) = (qr/6\.(?:2\.(?:[2-9]\d+|1[6-8]|1\d\d+)|5)[^~]*$/, 'proxmox-kernel-6.2');
my $kinstalled = 'proxmox-kernel-6.2';
if (!$upgraded) {
# we got a few that avoided 5.15 in cluster with mixed CPUs, so allow older too
($krunning, $kinstalled) = (qr/(?:5\.(?:13|15)|6\.2)/, 'pve-kernel-5.15');
$kinstalled = 'pve-kernel-5.15';
}
my $kernel_version_is_expected = sub {
my ($version) = @_;
return $version =~ m/^(?:5\.(?:13|15)|6\.2)/ if !$upgraded;
if ($version =~ m/^6\.(?:2\.(?:[2-9]\d+|1[6-8]|1\d\d+)|5)[^~]*$/) {
return 1;
} elsif ($version =~ m/^(\d+).(\d+)[^~]*-pve$/) {
return $1 >= 6 && $2 >= 2;
}
return 0;
};
print "\nChecking running kernel version..\n";
my $kernel_ver = $proxmox_ve->{RunningKernel};
if (!defined($kernel_ver)) {
log_fail("unable to determine running kernel version.");
} elsif ($kernel_ver =~ /^$krunning/) {
} elsif ($kernel_version_is_expected->($kernel_ver)) {
if ($upgraded) {
log_pass("running new kernel '$kernel_ver' after upgrade.");
} else {
@ -222,12 +235,12 @@ sub check_pve_packages {
}
} elsif ($get_pkg->($kinstalled)) {
# with 6.2 kernel being available in both we might want to fine-tune the check?
log_warn("a suitable kernel ($kinstalled) is intalled, but an unsuitable ($kernel_ver) is booted, missing reboot?!");
log_warn("a suitable kernel ($kinstalled) is installed, but an unsuitable ($kernel_ver) is booted, missing reboot?!");
} else {
log_warn("unexpected running and installed kernel '$kernel_ver'.");
}
if ($upgraded && $kernel_ver =~ /^$krunning/) {
if ($upgraded && $kernel_version_is_expected->($kernel_ver)) {
my $outdated_kernel_meta_pkgs = [];
for my $kernel_meta_version ('5.4', '5.11', '5.13', '5.15') {
my $pkg = "pve-kernel-${kernel_meta_version}";

View File

@ -216,7 +216,7 @@ __PACKAGE__->register_method ({
print "start installation\n";
# this flag helps to determine when apt is actually done installing (vs. partial extracing)
# this flag helps to determine when apt is actually done installing (vs. partial extracting)
my $install_flag_fn = PVE::Ceph::Tools::ceph_install_flag_file();
open(my $install_flag, '>', $install_flag_fn) or die "could not create install flag - $!\n";
close $install_flag;

View File

@ -351,7 +351,12 @@ sub call_api_method {
$data = $handler->handle($info, $param);
if (ref($data) eq 'HASH' && ref($data->{download}) eq 'HASH') {
# TODO: remove 'download' check with PVE 9.0
if (
ref($data) eq 'HASH'
&& ref($data->{download}) eq 'HASH'
&& ($info->{download_allowed} || $info->{download})
) {
$data = $handle_streamed_response->($data->{download})
}
}

View File

@ -50,28 +50,26 @@ sub broadcast_ceph_services {
sub broadcast_ceph_versions {
my ($version, $buildcommit, $vers_parts) = PVE::Ceph::Tools::get_local_version(1);
if ($version) {
my $nodename = PVE::INotify::nodename();
my $old = PVE::Cluster::get_node_kv("ceph-versions", $nodename);
if (defined($old->{$nodename})) {
$old = eval { decode_json($old->{$nodename}) };
warn $@ if $@; # should not happen
if (defined($old) && $old->{buildcommit} eq $buildcommit && $old->{version}->{str} eq $version) {
return; # up to date, nothing to do so avoid (not exactly cheap) broadcast
}
}
# FIXME: remove with 8.0 (or 7.2, its not _that_ bad) - for backward compat only
PVE::Cluster::broadcast_node_kv("ceph-version", $version);
return undef if !$version;
my $node_versions = {
version => {
str => $version,
parts => $vers_parts,
},
buildcommit => $buildcommit,
};
PVE::Cluster::broadcast_node_kv("ceph-versions", encode_json($node_versions));
my $nodename = PVE::INotify::nodename();
my $old = PVE::Cluster::get_node_kv("ceph-versions", $nodename);
if (defined($old->{$nodename})) {
$old = eval { decode_json($old->{$nodename}) };
warn $@ if $@; # should not happen
if (defined($old) && $old->{buildcommit} eq $buildcommit && $old->{version}->{str} eq $version) {
return; # up to date, nothing to do so avoid (not exactly cheap) broadcast
}
}
my $node_versions = {
version => {
str => $version,
parts => $vers_parts,
},
buildcommit => $buildcommit,
};
PVE::Cluster::broadcast_node_kv("ceph-versions", encode_json($node_versions));
}
sub get_ceph_versions {

View File

@ -57,24 +57,47 @@ my $config_files = {
sub get_local_version {
my ($noerr) = @_;
if (check_ceph_installed('ceph_bin', $noerr)) {
my $ceph_version;
run_command(
[ $ceph_service->{ceph_bin}, '--version' ],
noerr => $noerr,
outfunc => sub { $ceph_version = shift if !defined $ceph_version },
);
return undef if !defined $ceph_version;
return undef if !check_ceph_installed('ceph_bin', $noerr);
if ($ceph_version =~ /^ceph.*\sv?(\d+(?:\.\d+)+(?:-pve\d+)?)\s+(?:\(([a-zA-Z0-9]+)\))?/) {
my ($version, $buildcommit) = ($1, $2);
my $subversions = [ split(/\.|-/, $version) ];
my $ceph_version;
run_command(
[ $ceph_service->{ceph_bin}, '--version' ],
noerr => $noerr,
outfunc => sub { $ceph_version = shift if !defined $ceph_version },
);
# return (version, buildid, major, minor, ...) : major;
return wantarray
? ($version, $buildcommit, $subversions)
: $subversions->[0];
}
return undef if !defined $ceph_version;
my ($version, $buildcommit, $subversions) = parse_ceph_version($ceph_version);
return undef if !defined($version);
# return (version, buildid, [major, minor, ...]) : major;
return wantarray ? ($version, $buildcommit, $subversions) : $subversions->[0];
}
sub parse_ceph_version : prototype($) {
my ($ceph_version) = @_;
my $re_ceph_version = qr/
# Skip ahead to the version, which may optionally start with 'v'
^ceph.*\sv?
# Parse the version X.Y, X.Y.Z, etc.
( \d+ (?:\.\d+)+ ) \s+
# Parse the git commit hash between parentheses
(?: \( ([a-zA-Z0-9]+) \) )
/x;
if ($ceph_version =~ /$re_ceph_version/) {
my ($version, $buildcommit) = ($1, $2);
my $subversions = [ split(/\.|-/, $version) ];
# return (version, buildid, [major, minor, ...]) : major;
return wantarray
? ($version, $buildcommit, $subversions)
: $subversions->[0];
}
return undef;
@ -262,9 +285,17 @@ sub set_pool {
my $keys = [ grep { $_ ne 'size' } sort keys %$param ];
unshift @$keys, 'size' if exists $param->{size};
my $current_properties = get_pool_properties($pool, $rados);
for my $setting (@$keys) {
my $value = $param->{$setting};
if (defined($current_properties->{$setting}) && $value eq $current_properties->{$setting}) {
print "skipping '${setting}', did not change\n";
delete $param->{$setting};
next;
}
print "pool $pool: applying $setting = $value\n";
if (my $err = $set_pool_setting->($pool, $setting, $value, $rados)) {
print "$err";

View File

@ -12,7 +12,7 @@ use PVE::API2::VZDump;
use base qw(PVE::VZDump::JobBase);
sub run {
my ($class, $conf) = @_;
my ($class, $conf, $job_id) = @_;
my $props = $class->properties();
# remove all non vzdump related options
@ -20,6 +20,8 @@ sub run {
delete $conf->{$opt} if !defined($props->{$opt});
}
$conf->{'job-id'} = $job_id;
# Required as string parameters # FIXME why?! we could just check ref()
for my $key (keys $PVE::VZDump::Common::PROPERTY_STRINGS->%*) {
if ($conf->{$key} && ref($conf->{$key}) eq 'HASH') {

View File

@ -13,6 +13,7 @@ PERLSOURCE = \
HTTPServer.pm \
Jobs.pm \
NodeConfig.pm \
PullMetric.pm \
Report.pm \
VZDump.pm

225
PVE/PullMetric.pm Normal file
View File

@ -0,0 +1,225 @@
package PVE::PullMetric;
use strict;
use warnings;
use Proxmox::RS::SharedCache;
use PVE::Network;
use constant OLD_GENERATIONS => 180;
use constant LOCK_TIMEOUT => 2;
my $cache;
my $get_cache = sub {
if (!defined($cache)) {
my $uid = getpwnam('root');
my $gid = getgrnam('www-data');
$cache = Proxmox::RS::SharedCache->new({
path => "/run/pve/metrics",
owner => $uid,
group => $gid,
entry_mode => 0640, # Entry permissions
keep_old => OLD_GENERATIONS,
}
);
}
return $cache;
};
# Return the number of generations stored by the metrics cache
sub max_generations {
# Number of old stats plus the most recent ones
return OLD_GENERATIONS + 1;
}
sub transaction_start {
return {};
}
sub transaction_finish {
my ($txn) = @_;
$get_cache->()->set($txn, 2);
}
sub update {
my ($txn, $subsystem, $data, $timestamp) = @_;
$txn->{$subsystem}->{data} = $data;
$txn->{$subsystem}->{timestamp} = $timestamp;
}
my sub gauge {
my ($id, $timestamp, $metric, $value) = @_;
return {
metric => $metric,
id => $id,
value => $value + 0,
timestamp => $timestamp + 0,
type => 'gauge',
}
}
my sub derive {
my ($id, $timestamp, $metric, $value) = @_;
return {
metric => $metric,
id => $id,
value => $value + 0,
timestamp => $timestamp + 0,
type => 'derive',
}
}
my $nodename = PVE::INotify::nodename();
my sub get_node_metrics {
my ($stats) = @_;
my $metrics = [];
my $data = $stats->{data};
my $timestamp = $stats->{timestamp};
my $id = "node/$nodename";
push @$metrics, gauge($id, $timestamp, "uptime", $data->{uptime});
my ($netin, $netout) = (0, 0);
for my $dev (grep { /^$PVE::Network::PHYSICAL_NIC_RE$/ } keys $data->{nics}->%*) {
$netin += $data->{nics}->{$dev}->{receive};
$netout += $data->{nics}->{$dev}->{transmit};
}
push @$metrics, derive($id, $timestamp, "net_in", $netin);
push @$metrics, derive($id, $timestamp, "net_out", $netout);
my $cpustat = $data->{cpustat};
push @$metrics, gauge($id, $timestamp, "cpu_avg1", $cpustat->{avg1});
push @$metrics, gauge($id, $timestamp, "cpu_avg5", $cpustat->{avg5});
push @$metrics, gauge($id, $timestamp, "cpu_avg15", $cpustat->{avg15});
push @$metrics, gauge($id, $timestamp, "cpu_max", $cpustat->{cpus});
push @$metrics, gauge($id, $timestamp, "cpu_current", $cpustat->{cpu});
push @$metrics, gauge($id, $timestamp, "cpu_iowait", $cpustat->{iowait});
my $memory = $data->{memory};
push @$metrics, gauge($id, $timestamp, "mem_total", $memory->{memtotal});
push @$metrics, gauge($id, $timestamp, "mem_used", $memory->{memused});
push @$metrics, gauge($id, $timestamp, "swap_total", $memory->{swaptotal});
push @$metrics, gauge($id, $timestamp, "swap_used", $memory->{swapused});
my $blockstat = $data->{blockstat};
my $dused = $blockstat->{blocks} - $blockstat->{bfree};
push @$metrics, gauge($id, $timestamp, "disk_total", $blockstat->{blocks});
push @$metrics, gauge($id, $timestamp, "disk_used", $dused);
return $metrics;
}
my sub get_qemu_metrics {
my ($stats) = @_;
my $metrics = [];
my $timestamp = $stats->{timestamp};
for my $vmid (keys $stats->{data}->%*) {
my $id = "qemu/$vmid";
my $guest_data = $stats->{data}->{$vmid};
if ($guest_data->{status} eq 'running') {
push @$metrics, gauge($id, $timestamp, "cpu_current", $guest_data->{cpu});
push @$metrics, gauge($id, $timestamp, "mem_used", $guest_data->{mem});
push @$metrics, derive($id, $timestamp, "disk_read", $guest_data->{diskread});
push @$metrics, derive($id, $timestamp, "disk_write", $guest_data->{diskwrite});
push @$metrics, derive($id, $timestamp, "net_in", $guest_data->{netin});
push @$metrics, derive($id, $timestamp, "net_out", $guest_data->{netout});
}
push @$metrics, gauge($id, $timestamp, "uptime", $guest_data->{uptime});
push @$metrics, gauge($id, $timestamp, "cpu_max", $guest_data->{cpus});
push @$metrics, gauge($id, $timestamp, "mem_total", $guest_data->{maxmem});
push @$metrics, gauge($id, $timestamp, "disk_total", $guest_data->{maxdisk});
# TODO: This one always seems to be 0?
# push @$metrics, num_metric("disk_used", $id, $guest_data->{disk}, $timestamp);
}
return $metrics;
}
my sub get_lxc_metrics {
my ($stats) = @_;
my $metrics = [];
my $timestamp = $stats->{timestamp};
for my $vmid (keys $stats->{data}->%*) {
my $id = "lxc/$vmid";
my $guest_data = $stats->{data}->{$vmid};
if ($guest_data->{status} eq 'running') {
push @$metrics, gauge($id, $timestamp, "cpu_current", $guest_data->{cpu});
push @$metrics, gauge($id, $timestamp, "mem_used", $guest_data->{mem});
push @$metrics, derive($id, $timestamp, "disk_read", $guest_data->{diskread});
push @$metrics, derive($id, $timestamp, "disk_write", $guest_data->{diskwrite});
push @$metrics, derive($id, $timestamp, "net_in", $guest_data->{netin});
push @$metrics, derive($id, $timestamp, "net_out", $guest_data->{netout});
push @$metrics, gauge($id, $timestamp, "disk_used", $guest_data->{disk});
}
push @$metrics, gauge($id, $timestamp, "uptime", $guest_data->{uptime});
push @$metrics, gauge($id, $timestamp, "cpu_max", $guest_data->{cpus});
push @$metrics, gauge($id, $timestamp, "mem_total", $guest_data->{maxmem});
push @$metrics, gauge($id, $timestamp, "disk_total", $guest_data->{maxdisk});
}
return $metrics;
}
my sub get_storage_metrics {
my ($stats) = @_;
my $metrics = [];
my $timestamp = $stats->{timestamp};
for my $sid (keys $stats->{data}->%*) {
my $id = "storage/$nodename/$sid";
my $data = $stats->{data}->{$sid};
push @$metrics, gauge($id, $timestamp, "disk_total", $data->{total});
push @$metrics, gauge($id, $timestamp, "disk_used", $data->{used});
}
return $metrics;
}
# Return local metrics, including some recent history if needed.
#
sub get_local_metrics {
my ($history) = @_;
# If we do not provide the history parameter, set it to 0 -> only
# query most recent metrics from the cache.
$history = $history // 0;
$history = int($history);
my $metrics = [];
my $data = $get_cache->()->get_last($history);
for my $stat_gen ($data->@*) {
push @$metrics, get_node_metrics($stat_gen->{node})->@*;
push @$metrics, get_qemu_metrics($stat_gen->{qemu})->@*;
push @$metrics, get_lxc_metrics($stat_gen->{lxc})->@*;
push @$metrics, get_storage_metrics($stat_gen->{storage})->@*;
}
return $metrics;
}
1;

View File

@ -31,6 +31,7 @@ use PVE::Ceph::Tools;
use PVE::pvecfg;
use PVE::ExtMetric;
use PVE::PullMetric;
use PVE::Status::Plugin;
use base qw(PVE::Daemon);
@ -103,7 +104,7 @@ sub update_supported_cpuflags {
$supported_cpuflags = {};
} else {
# only set cached version if there's actually something to braodcast
# only set cached version if there's actually something to broadcast
$cached_kvm_version = $kvm_version;
}
@ -147,7 +148,7 @@ my sub broadcast_static_node_info {
}
sub update_node_status {
my ($status_cfg) = @_;
my ($status_cfg, $pull_txn) = @_;
my ($uptime) = PVE::ProcFSTools::read_proc_uptime();
@ -200,6 +201,8 @@ sub update_node_status {
PVE::ExtMetric::update_all($transactions, 'node', $nodename, $node_metric, $ctime);
PVE::ExtMetric::transactions_finish($transactions);
PVE::PullMetric::update($pull_txn, 'node', $node_metric, $ctime);
broadcast_static_node_info($maxcpu, $meminfo->{memtotal});
}
@ -232,7 +235,7 @@ sub auto_balloning {
}
sub update_qemu_status {
my ($status_cfg) = @_;
my ($status_cfg, $pull_txn) = @_;
my $ctime = time();
my $vmstatus = PVE::QemuServer::vmstatus(undef, 1);
@ -262,6 +265,8 @@ sub update_qemu_status {
}
PVE::ExtMetric::transactions_finish($transactions);
PVE::PullMetric::update($pull_txn, 'qemu', $vmstatus, $ctime);
}
sub remove_stale_lxc_consoles {
@ -441,7 +446,7 @@ sub rebalance_lxc_containers {
}
sub update_lxc_status {
my ($status_cfg) = @_;
my ($status_cfg, $pull_txn) = @_;
my $ctime = time();
my $vmstatus = PVE::LXC::vmstatus();
@ -470,10 +475,12 @@ sub update_lxc_status {
PVE::ExtMetric::update_all($transactions, 'lxc', $vmid, $d, $ctime, $nodename);
}
PVE::ExtMetric::transactions_finish($transactions);
PVE::PullMetric::update($pull_txn, 'lxc', $vmstatus, $ctime);
}
sub update_storage_status {
my ($status_cfg) = @_;
my ($status_cfg, $pull_txn) = @_;
my $cfg = PVE::Storage::config();
my $ctime = time();
@ -493,6 +500,8 @@ sub update_storage_status {
PVE::ExtMetric::update_all($transactions, 'storage', $nodename, $storeid, $d, $ctime);
}
PVE::ExtMetric::transactions_finish($transactions);
PVE::PullMetric::update($pull_txn, 'storage', $info, $ctime);
}
sub rotate_authkeys {
@ -533,6 +542,8 @@ sub update_status {
# correct list in case of an unexpected crash.
my $rpcenv = PVE::RPCEnvironment::get();
my $pull_txn = PVE::PullMetric::transaction_start();
eval {
my $tlist = $rpcenv->active_workers();
PVE::Cluster::broadcast_tasklist($tlist);
@ -543,19 +554,19 @@ sub update_status {
my $status_cfg = PVE::Cluster::cfs_read_file('status.cfg');
eval {
update_node_status($status_cfg);
update_node_status($status_cfg, $pull_txn);
};
$err = $@;
syslog('err', "node status update error: $err") if $err;
eval {
update_qemu_status($status_cfg);
update_qemu_status($status_cfg, $pull_txn);
};
$err = $@;
syslog('err', "qemu status update error: $err") if $err;
eval {
update_lxc_status($status_cfg);
update_lxc_status($status_cfg, $pull_txn);
};
$err = $@;
syslog('err', "lxc status update error: $err") if $err;
@ -567,7 +578,7 @@ sub update_status {
syslog('err', "lxc cpuset rebalance error: $err") if $err;
eval {
update_storage_status($status_cfg);
update_storage_status($status_cfg, $pull_txn);
};
$err = $@;
syslog('err', "storage status update error: $err") if $err;
@ -601,6 +612,12 @@ sub update_status {
};
$err = $@;
syslog('err', "version info update error: $err") if $err;
eval {
PVE::PullMetric::transaction_finish($pull_txn);
};
$err = $@;
syslog('err', "could not populate metric data cache: $err") if $err;
}
my $next_update = 0;

View File

@ -89,7 +89,7 @@ sub _send_batch_size {
return $mtu - 50; # a bit more than 48byte to allow for safe room
}
# call with the smalles $data chunks possible
# call with the smallest $data chunks possible
sub add_metric_data {
my ($class, $txn, $data) = @_;
return if !defined($data);

View File

@ -27,6 +27,17 @@ use PVE::VZDump::Plugin;
use PVE::Tools qw(extract_param split_list);
use PVE::API2Tools;
# section config header/ID, this needs to cover UUIDs, user given values
# and `$digest:$counter` values converted from vzdump.cron
# TODO move to a better place once cycle
# Jobs::VZDump -> API2::VZDump -> API2::Backups -> Jobs::VZDump is broken..
PVE::JSONSchema::register_standard_option('pve-backup-jobid', {
type => 'string',
description => "The job ID.",
maxLength => 50,
pattern => '\S+',
});
my @posix_filesystems = qw(ext3 ext4 nfs nfs4 reiserfs xfs);
my $lockfile = '/var/run/vzdump.lock';
@ -483,6 +494,7 @@ sub send_notification {
my ($self, $tasklist, $total_time, $err, $detail_pre, $detail_post) = @_;
my $opts = $self->{opts};
my $job_id = $opts->{'job-id'};
my $mailto = $opts->{mailto};
my $cmdline = $self->{cmdline};
my $policy = $opts->{mailnotification} // 'always';
@ -516,10 +528,9 @@ sub send_notification {
"See Task History for details!\n";
};
my $hostname = get_hostname();
my $notification_props = {
"hostname" => $hostname,
# Hostname, might include domain part
"hostname" => get_hostname(),
"error-message" => $err,
"guest-table" => build_guest_table($tasklist),
"logs" => $text_log_part,
@ -529,12 +540,12 @@ sub send_notification {
};
my $fields = {
# TODO: There is no straight-forward way yet to get the
# backup job id here... (I think pvescheduler would need
# to pass that to the vzdump call?)
type => "vzdump",
hostname => $hostname,
# Hostname (without domain part)
hostname => PVE::INotify::nodename(),
};
# Add backup-job metadata field in case this is a backup job.
$fields->{'job-id'} = $job_id if $job_id;
my $severity = $failed ? "error" : "info";
my $email_configured = $mailto && scalar(@$mailto);
@ -1084,7 +1095,7 @@ sub exec_backup_task {
$task->{mode} = $mode;
debugmsg ('info', "backup mode: $mode", $logfd);
debugmsg ('info', "bandwidth limit: $opts->{bwlimit} KB/s", $logfd) if $opts->{bwlimit};
debugmsg ('info', "bandwidth limit: $opts->{bwlimit} KiB/s", $logfd) if $opts->{bwlimit};
debugmsg ('info', "ionice priority: $opts->{ionice}", $logfd);
if ($mode eq 'stop') {

View File

@ -5,17 +5,17 @@ Description: News displayed on the admin interface
<a href='https://www.proxmox.com' target='_blank'>www.proxmox.com</a>
Package: almalinux-9-default
Version: 20221108
Version: 20240911
Type: lxc
OS: almalinux
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
Location: system/almalinux-9-default_20221108_amd64.tar.xz
md5sum: 03e6d335c14b96501bc39e7852da9772
sha512sum: 9b4561fad0de45943c0c46d9a075796533b0941c442cb70a5f9a323a601aba41f2e5e86d5e1600e5538b1470cf29f0cfd9f7f7c54ca9529cb9cbe234f2c6f440
Location: system/almalinux-9-default_20240911_amd64.tar.xz
md5sum: aec3769056165ed72a2523e6a2e0b12f
sha512sum: 4c3c5c2616b46fbb0b831a87669e6f323167c2f67cca487723ff3b00284309c547fec03d5e87678c3c6d306a764d9875fbcd01c46e1caf8203784c4957a907f0
Infopage: https://linuxcontainers.org
Description: LXC default image for almalinux 9 (20221108)
Description: LXC default image for almalinux 9 (20240911)
Package: alpine-3.18-default
Version: 20230607
@ -43,32 +43,45 @@ sha512sum: dec171b608802827a2b47ae6c473f71fdea5fae0064a7928749fc5a854a7220a3244e
Infopage: https://linuxcontainers.org
Description: LXC default image for alpine 3.19 (20240207)
Package: alpine-3.20-default
Version: 20240908
Type: lxc
OS: alpine
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
Location: system/alpine-3.20-default_20240908_amd64.tar.xz
md5sum: ea04f7220f47522341a1ac7c4a728344
sha512sum: 8b675c373533c096ded4168edbf2fba2a6e3ed2d91a550b0d128e472d2a0d65a8f2e5e7dde772932d8d3fd9d93ca21b3e5e3b8a4c91bc479977e3f6ce1468ba9
Infopage: https://linuxcontainers.org
Description: LXC default image for alpine 3.20 (20240908)
Package: archlinux-base
Version: 20230608-1
Version: 20240911-1
Type: lxc
OS: archlinux
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
Location: system/archlinux-base_20230608-1_amd64.tar.zst
md5sum: 581d33c1c8b71aa72df47d84285eef8e
sha512sum: 7f1ece6ded3d8571da8e0e20695e68391bec7585746632c6d555c8a8ecff3611fd87cf136c823d33d795e2c9adae9501190796a6d54c097c134afb965c97600f
Location: system/archlinux-base_20240911-1_amd64.tar.zst
md5sum: 2c137021b89cc583297eed26b10f46dc
sha512sum: 05b36399c801b774d9540ddbae6be6fc26803bc161e7888722f8f36c48569010e12392c6741bf263336b8542c59f18f67cf4f311d52b3b8dd58640efca765b85
Infopage: https://www.archlinux.org
Description: ArchLinux base image.
ArchLinux template with the 'base' group and the 'openssh' package installed.
Package: centos-9-stream-default
Version: 20221109
Version: 20240828
Type: lxc
OS: centos
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
Location: system/centos-9-stream-default_20221109_amd64.tar.xz
md5sum: 13fccdcc2358b795ee613501eb88c850
sha512sum: 04bb902992f74edf2333d215837e9bb21258dfcdb7bf23bd659176641f6538aeb25bc44286c9caffb10ceb87288ce93668c9410f4a69b8a3b316e09032ead3a8
Location: system/centos-9-stream-default_20240828_amd64.tar.xz
md5sum: 159f3f7dd7e2d6188867e7fecb4b4bbd
sha512sum: de6ec0a6ffc2e980d4baa11185765ba925822462f14c387b8e50ffda0f2281e8fb8bebc53c84d39b8073dd543ea98496685c828ab713f2ec0ba56dd25e6ddbef
Infopage: https://linuxcontainers.org
Description: LXC default image for centos 9-stream (20221109)
Description: LXC default image for centos 9-stream (20240828)
Package: debian-11-standard
Version: 11.7-1
@ -85,45 +98,32 @@ Description: Debian 11 Bullseye (standard)
A small Debian Bullseye system including all standard packages.
Package: debian-12-standard
Version: 12.2-1
Version: 12.7-1
Type: lxc
OS: debian-12
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
Location: system/debian-12-standard_12.2-1_amd64.tar.zst
md5sum: 0c40b2b49499c827bbf7db2d7a3efadc
sha512sum: 1846c5e64253256832c6f7b8780c5cb241abada3ab0913940b831bf8f7f869220277f5551f0abeb796852e448c178be22bd44eb1af8c0be3d5a13decf943398a
Location: system/debian-12-standard_12.7-1_amd64.tar.zst
md5sum: 0b6959720ebd506b5ddb2fbf8780ca6c
sha512sum: 39f6d06e082d6a418438483da4f76092ebd0370a91bad30b82ab6d0f442234d63fe27a15569895e34d6d1e5ca50319f62637f7fb96b98dbde4f6103cf05bff6d
Infopage: https://pve.proxmox.com/wiki/Linux_Container#pct_supported_distributions
Description: Debian 12 Bookworm (standard)
A small Debian Bullseye system including all standard packages.
Package: devuan-4.0-standard
Version: 4.0
Package: devuan-5.0-standard
Version: 5.0
Type: lxc
OS: devuan-4.0
OS: devuan-5.0
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
Location: system/devuan-4.0-standard_4.0_amd64.tar.gz
md5sum: 515a89407afd96c3128125b427366aa0
sha512sum: af909d1c3af35a53d063bc273d7a9e7df08fb5a914432d7fba8cdd3271132cf8e7b2c53306d59ef01e53693081dd2127be2fb790d2ff5e98cb3e860f5d257d89
Location: system/devuan-5.0-standard_5.0_amd64.tar.gz
md5sum: 4e1aadd38db6f0ec628f7fa042cde11f
sha512sum: a70bf2f0851d1ad55bf0c92808e1543173daf30ed6e0af792bc0288dde9b15a841f4eb3ad4440fdb88333250cdcf8f01a6ed446ba9354c33fda68523bd88a9b5
Infopage: https://devuan.org
Description: Devuan 4.0 (standard)
A small Devuan Chimaera system including a minimal set of essential packages.
Package: fedora-38-default
Version: 20230607
Type: lxc
OS: fedora
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
Location: system/fedora-38-default_20230607_amd64.tar.xz
md5sum: 45bbf8c641320aa91f2c8cf52a5280cc
sha512sum: 54328a3338ca9657d298a8a5d2ca15fe76f66fd407296d9e3e1c236ee60ea075d3406c175fdb46fe5c29e664224f2ce33bfe8cd1f634b7ea08d0183609d5e93c
Infopage: https://linuxcontainers.org
Description: LXC default image for fedora 38 (20230607)
Description: Devuan 5 (standard)
A small Devuan Daedalus system including a minimal set of essential packages.
Package: fedora-39-default
Version: 20231118
@ -138,6 +138,19 @@ sha512sum: 921cde6021e3c109e0d560b3e8ff4968c885fad8bcd8c2796d9ca733d362be8dd407d
Infopage: https://linuxcontainers.org
Description: LXC default image for fedora 39 (20231118)
Package: fedora-40-default
Version: 20240909
Type: lxc
OS: fedora
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
Location: system/fedora-40-default_20240909_amd64.tar.xz
md5sum: c4fc793117e000edb6835c7c863c8683
sha512sum: 1d13bbebf18b01917c05e46e1d6d5ce4c8a8c015a510f9a526783e57e2b4821cd8f549abc8d080cba26377dfdb9094941e7a155acc4f5aced5ba3b682bb5b382
Infopage: https://linuxcontainers.org
Description: LXC default image for fedora 40 (20240909)
Package: gentoo-current-openrc
Version: 20231009
Type: lxc
@ -151,19 +164,6 @@ sha512sum: 40926859f0a777e8dfa7de10c76eb78ee92aa554e0598a09beec7f260ec7a02f19dfc
Infopage: https://linuxcontainers.org
Description: LXC openrc image for gentoo current (20231009)
Package: opensuse-15.4-default
Version: 20221109
Type: lxc
OS: opensuse
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
Location: system/opensuse-15.4-default_20221109_amd64.tar.xz
md5sum: 1c66c3549b0684e788c17aa94c384262
sha512sum: 8089309652a0db23ddff826d1e343e79c6eccb7b615fb309e0a6f6f1983ea697aa94044a795f3cbe35156b1a1b2f60489eb20ecb54c786cec23c9fd89e0f29c5
Infopage: https://linuxcontainers.org
Description: LXC default image for opensuse 15.4 (20221109)
Package: opensuse-15.5-default
Version: 20231118
Type: lxc
@ -177,6 +177,19 @@ sha512sum: f3a6785c347da3867d074345b68db9c99ec2b269e454f715d234935014ca1dc9f7239
Infopage: https://linuxcontainers.org
Description: LXC default image for opensuse 15.5 (20231118)
Package: opensuse-15.6-default
Version: 20240910
Type: lxc
OS: opensuse
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
Location: system/opensuse-15.6-default_20240910_amd64.tar.xz
md5sum: c1de1a7a5fad45ee90bc4d5c98f2ad3f
sha512sum: a0154162ef5128ebce176cab67d98b3d0c1db7bde92ac13e89b799de81a9b99f80d9f7afaf032a8247808a5d2d13261c5b7b9148fe03d60111666edfffc94523
Infopage: https://linuxcontainers.org
Description: LXC default image for opensuse 15.6 (20240910)
Package: proxmox-mail-gateway-8.1-standard
Version: 8.1-1
Type: lxc
@ -206,17 +219,17 @@ Description: Proxmox Mailgateway 7.3
A full featured mail proxy for spam and virus filtering, optimized for container environment.
Package: rockylinux-9-default
Version: 20221109
Version: 20240912
Type: lxc
OS: rockylinux
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
Location: system/rockylinux-9-default_20221109_amd64.tar.xz
md5sum: e6aa40bb6a4e01c61fd27eb3da5446d1
sha512sum: ddc2a29ee66598d4c3a4224a0fa9868882e80bbabb7a20ae9f53431bb0ff73e73d4bd48b86bb0e9d1330e0af2c500f461ea5dc3c500ef722b472257acdc4ab41
Location: system/rockylinux-9-default_20240912_amd64.tar.xz
md5sum: 3a782d5d1c33d05eeff2e21579bdfae8
sha512sum: 864bcf3f21fd8616d3cd9e2330f7372ad7b29b070b746c4a4bd73f372db7cc5d06f3e7058b5571379e8f6c3c9d987a3ea3b43559c12f63c7ef096e9afa9ead46
Infopage: https://linuxcontainers.org
Description: LXC default image for rockylinux 9 (20221109)
Description: LXC default image for rockylinux 9 (20240912)
Package: ubuntu-20.04-standard
Version: 20.04-1
@ -246,34 +259,6 @@ Infopage: https://pve.proxmox.com/wiki/Linux_Container#pct_supported_distributio
Description: Ubuntu 22.04 Jammy (standard)
A small Ubuntu 22.04 Jammy Jellyfish system including all standard packages.
Package: ubuntu-23.04-standard
Version: 23.04-1
Type: lxc
OS: ubuntu-23.04
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
Location: system/ubuntu-23.04-standard_23.04-1_amd64.tar.zst
md5sum: 5dee55750bd72e210be6603e3b87005b
sha512sum: 6c7d916cc76865d5984b6b41e3d45426071967059ecc0a5d10029d2706cca0ea96a3c4f4dfcede08e7a86f73b9dfbd3d07ac86ee380b0f12be6c35e486033249
Infopage: https://pve.proxmox.com/wiki/Linux_Container#pct_supported_distributions
Description: Ubuntu 23.04 Lunar (standard)
A small Ubuntu 23.04 Lunar Lobster system including all standard packages.
Package: ubuntu-23.10-standard
Version: 23.10-1
Type: lxc
OS: ubuntu-23.10
Section: system
Maintainer: Proxmox Support Team <support@proxmox.com>
Architecture: amd64
Location: system/ubuntu-23.10-standard_23.10-1_amd64.tar.zst
md5sum: 91b92c717f09d2172471b4c85a00aea3
sha512sum: 84bcb7348ba86026176ed35e5b798f89cb64b0bcbe27081e3f97e3002a6ec34d836bbc1e1b1ce5f327151d1bc79cf1d08edf5a9d2b21a60ffa91e96284e5a67f
Infopage: https://pve.proxmox.com/wiki/Linux_Container#pct_supported_distributions
Description: Ubuntu 23.10 Mantic (standard)
A small Ubuntu 23.10 Mantic Minotaur system including all standard packages.
Package: ubuntu-24.04-standard
Version: 24.04-2
Type: lxc

View File

@ -68,7 +68,7 @@ pve7to8.1:
printf ".TH PVE7TO8 1\n.SH NAME\npve7to8 \- Proxmox VE upgrade checker script for 7.4+ to current 8.x\n" > $@.tmp
printf ".SH DESCRIPTION\nThis tool will help you to detect common pitfalls and misconfguration\
before, and during the upgrade of a Proxmox VE system\n" >> $@.tmp
printf "Any failure must be addressed before the upgrade, and any waring must be addressed, \
printf "Any failure must be addressed before the upgrade, and any warning must be addressed, \
or at least carefully evaluated, if a false-positive is suspected\n" >> $@.tmp
printf ".SH SYNOPSIS\npve7to8 [--full]\n" >> $@.tmp
mv $@.tmp $@

View File

@ -17,3 +17,4 @@
#pigz: N
#notes-template: {{guestname}}
#pbs-change-detection-mode: legacy|data|metadata
#fleecing: enabled=BOOLEAN,storage=STORAGE_ID

181
debian/changelog vendored
View File

@ -1,3 +1,90 @@
pve-manager (8.2.7) bookworm; urgency=medium
* api: add missing schema annotations for download endpoints and adapt
existing ones to renaming the respective property to 'download_allowed'.
* pvesh: limit download handling to annotated endpoints to unify the
behaviour with pve-http-server.
-- Proxmox Support Team <support@proxmox.com> Mon, 23 Sep 2024 11:43:24 +0200
pve-manager (8.2.6) bookworm; urgency=medium
* fix #5731: vzdump jobs: fix execution of converted jobs
-- Proxmox Support Team <support@proxmox.com> Fri, 20 Sep 2024 17:47:17 +0200
pve-manager (8.2.5) bookworm; urgency=medium
* ui: qemu hardware: use background delay for asynchronous remove tasks so
that the task log window doesn't get shown if the task already finished.
* ui: qemu hardware: use asynchronous remove API call for disk hot-unplug to
expand the task duration timeout from 30 seconds to 10 minutes, helping
overloaded and/or slow systems.
* vzdump config: add fleecing property string to make it clear that fleecing
can be configured as a node-wide default too.
* ui: fix align mode of the two-column container widget, which is used for
the advanced tab of the backup job edit window.
* vzdump: fix unit for bandwidth limit in log message, it's in KiB (base 2)
not KB (base 10)
* pve7to8: allow one to run arbitrary newer Proxmox kernels after upgrade
* api: replication: add 'job-id' to notification metadata to allow users to
create notification match rules for specific replication jobs.
* vzdump: apt: notification: do not include domain in 'hostname' field.
* api: replication: include 'hostname' field for notifications.
* pve7to8: fix typo in log message.
* ui: backup job: clarify experimental change detection modes.
* ui: fix `maxcpu` validity check in the host CPU renderer to add a missing
space, making it consistent with the CPU usage column.
* ui: utils: fix inconsistency in host cpu usage display in search view,
adding a missing space between the number of CPUs and the actual label.
* ui: sdn ipam: fix editing custom mappings
* api: ceph mds: ensure the ID for a new MDS does not start with a number,
as Ceph doesn't supports that.
* fix #5570 ui: ceph: make the MDS ID fully configurable to avoid a issue
when the hostname, which is was used as base for the ID, starts with a
number. This also made the "extra ID" field redundant.
* fix #5010: ceph: pool set only properties that actually changed to avoid
false-positive errors.
* api: backup jobs: pass the job ID parameter to make it accessible for the
notification system.
* ui: backup jobs: allow one to set custom job ID in advanced settings
* ui: resource mappings: fix editing of mapping for non first node
* ui: sdn vnets: hide irrelevant fields depending on zone type
* ui: dc summary: fix calculation of storage size when one or more nodes are
offline
* api: metrics: add /cluster/metrics/export endpoint
* ui: qemu: hardware: fix permission check for enabling the button to add a
TPM-state volume
* update shipped appliance info index
-- Proxmox Support Team <support@proxmox.com> Thu, 12 Sep 2024 14:37:24 +0200
pve-manager (8.2.4) bookworm; urgency=medium
* pvestatd: clear trailing newlines
@ -584,7 +671,7 @@ pve-manager (8.0.0~9) bookworm; urgency=medium
* ui: create VM: use new x86-64-v2-AES as new default vCPU type.
The x86-64-v2-aes is compatible with Intel Westmere, launched in 2010 and
Opteron 6200-series "Interlagos" launched in 2011.
This model provides a few important extra fetures over the qemu64/kvm64
This model provides a few important extra features over the qemu64/kvm64
model (which are basically v1 minus the -vme,-cx16 CPU flags) like SSE4.1
and additionally also AES, while not supported by all v2 models it is by
all recent ones, improving performance of many computing operations
@ -689,7 +776,7 @@ pve-manager (8.0.0~7) bookworm; urgency=medium
pve-manager (8.0.0~6) bookworm; urgency=medium
* vzdump: prepare to support 'exclude-path' as arry (multiple lines in the
* vzdump: prepare to support 'exclude-path' as array (multiple lines in the
config)
-- Proxmox Support Team <support@proxmox.com> Mon, 05 Jun 2023 10:05:00 +0200
@ -769,7 +856,7 @@ pve-manager (7.4-2) bullseye; urgency=medium
* ui: lvm-thin: fix not being able to edit the storage
* replication: fix uninitalized warning
* replication: fix uninitialized warning
* api: apt: versions: also list Proxmox libraries for interfacing with
Rust
@ -1002,7 +1089,7 @@ pve-manager (7.2-15) bullseye; urgency=medium
keypresses
* ui: tags: implement dirty tracking for inline editing and highlight finish
inline editing button if there are uncomitted changes
inline editing button if there are uncommitted changes
* ui: datacenter options: add basic editor for Cluster Resource Scheduling
setting
@ -1334,7 +1421,7 @@ pve-manager (7.1-13) bullseye; urgency=medium
to a slower frequency to better convey that there may be an acute failing,
not just a flapping error like one mail could suggest.
* replication: include more info in mail like failure-count, last succesful
* replication: include more info in mail like failure-count, last successful
sync, next try and current target node.
* ui: sdn: zone evpn: delete exitnodes-primary if empty
@ -1474,7 +1561,7 @@ pve-manager (7.1-8) bullseye; urgency=medium
* ui: upload/download image: trim whitespace from checksum value
* ui: calendar event: add an once-daily example and clarify workday at 00:00
one slighlty
one slightly
* ui: container: allow one to set the 'lazytime' mount option
@ -1554,7 +1641,7 @@ pve-manager (7.1-1) bullseye; urgency=medium
* api/services: add pvescheduler to the service list
* ui: ceph: add more compatibilty code for versions
* ui: ceph: add more compatibility code for versions
* ui: backup: show retention settings in own column
@ -1609,7 +1696,7 @@ pve-manager (7.0-15) bullseye; urgency=medium
* ui: dc/options: display WebAuthn and add editor
* api: ceph-mds: get mds state when multple ceph filesystems exist
* api: ceph-mds: get mds state when multiple ceph filesystems exist
* ceph fs: support creating multiple CephFS via UI/API
@ -1690,7 +1777,7 @@ pve-manager (7.0-12) bullseye; urgency=medium
* ui: backup job detail view: merge mail notification when/to fields
* ui: improve ACME edit/apply button visibilty
* ui: improve ACME edit/apply button visibility
* fix #3620: ui: dc/storage: delete empty pbs fingerprint on edit
@ -1728,7 +1815,7 @@ pve-manager (7.0-11) bullseye; urgency=medium
* ui: node/ZFS: add zstd to possible compression methods
* vzdump: use storage plugins to determine backup feasability
* vzdump: use storage plugins to determine backup feasibility
* ui: lxc/CreateWizard: add a 'nesting' checkbox and enable it by default
@ -1757,7 +1844,7 @@ pve-manager (7.0-10) bullseye; urgency=medium
pve-manager (7.0-9) bullseye; urgency=medium
* pve6to7: sync new checks and adaptions from stable-6 branch for consitency
* pve6to7: sync new checks and adaptions from stable-6 branch for consistency
* fix #3490: do not filter out 'Generic System Peripheral' PCI(e) devices by
default
@ -1787,7 +1874,7 @@ pve-manager (7.0-8) bullseye; urgency=medium
* pveceph: also install nvme-cli
* ui: dc/guests: change layouting to a flexbox, fixing render issues in
Firefox on somes systems
Firefox on some systems
* pve6to7: sync new checks from stable-6 branch
@ -1882,7 +1969,7 @@ pve-manager (7.0-3) bullseye; urgency=medium
snapshots got pruned
* node config: limit description/comment length to 64 KiB and check in
pve6to7 for overly long exisitng notes.
pve6to7 for overly long existing notes.
* ui: fix unlimited height of VM/CT Summary notes-view, noticeable on long text
@ -1912,7 +1999,7 @@ pve-manager (7.0-1) bullseye; urgency=medium
* re-build for Proxmox VE 7 based on Debian Bullseye
* d/control: order ifupdown2 as first, prefered, dependency
* d/control: order ifupdown2 as first, preferred, dependency
* enable installation of ceph 16.2 pacific release
@ -1939,7 +2026,7 @@ pve-manager (6.4-6) pve; urgency=medium
the IPv6 network stack system-wide (which should be done by setting the
`net.ipv6.conf.all.disable_ipv6` sysctl, not over the kernel command line)
* ui: storage status: use SI (base ten) units for usage for consitency with
* ui: storage status: use SI (base ten) units for usage for consistency with
RRD chart
-- Proxmox Support Team <support@proxmox.com> Fri, 07 May 2021 18:17:25 +0200
@ -2010,7 +2097,7 @@ pve-manager (6.4-1) pve; urgency=medium
* fix #417: proxy: allow one to configure a address where the pveproxy and
spiceproxy should listen and default to listening on both, IPv6 and IPv4.
This changes the log format for IPv4 addresses, as ::ffff: will be prefixed.
Either adpat any matching software (for example, fail2ban) or disable this
Either adapt any matching software (for example, fail2ban) or disable this
by setting the LISTEN_IP to 0.0.0.0, if this is not wanted.
* ui: safe destroy guest: add checkbox for removal of unreferenced disks
@ -2092,7 +2179,7 @@ pve-manager (6.3-5) pve; urgency=medium
pve-manager (6.3-4) pve; urgency=medium
* pveceph: improve useability of 'status' command
* pveceph: improve usability of 'status' command
* fix #3203: ui: VM hardware: use correct OS default model for network device
type
@ -2256,7 +2343,7 @@ pve-manager (6.2-13) pve; urgency=medium
* partially fix #3056: namespace vzdump temporary directory with VMID
* ceph: allow to alter settings of an exisiting pool, only over API/CLI for
* ceph: allow to alter settings of an existing pool, only over API/CLI for
now
* ceph: add option to control placement-group autoscale mode on pool create
@ -2397,7 +2484,7 @@ pve-manager (6.2-6) pve; urgency=medium
pve-manager (6.2-5) pve; urgency=medium
* improve ACME interaces
* improve ACME interfaces
* fix #2747: ui: API token create: always reset base submit URL
@ -2606,7 +2693,7 @@ pve-manager (6.1-4) pve; urgency=medium
pve-manager (6.1-3) pve; urgency=medium
* api: apt/versions: improve stabillity under edge cases
* api: apt/versions: improve stability under edge cases
* improve pveversion for removed but not yet purged packages
@ -2618,7 +2705,7 @@ pve-manager (6.1-2) pve; urgency=medium
* ui: dc/options: add onlineHelp to u2f and ha edit window
* LXC: Disable resize button when volume is unusued
* LXC: Disable resize button when volume is unused
* ui: dc/options: fix capabillities for u2f, bwlimits, ha settings
@ -2797,7 +2884,7 @@ pve-manager (6.0-7) pve; urgency=medium
* fix #2340: gui: ceph: handle 'null' versions for hosts
* fix #2341: ceph: osd create: allow db/wal on partioned disks
* fix #2341: ceph: osd create: allow db/wal on partitioned disks
* ui: allow one to add the AES flag explicitly to VMs
@ -2976,7 +3063,7 @@ pve-manager (6.0-0+3) pve; urgency=medium
* VM migrate: do some initial checks before a migrate is triggered to notify
users about problems earlier
* ceph: init: update inital config for nautilus
* ceph: init: update initial config for nautilus
* ceph: improve monitor creation and adapt for Nautilus
@ -3189,7 +3276,7 @@ pve-manager (5.3-10) unstable; urgency=medium
* fix #2096: enable resume in context menu for guest triggered sleep
* default to unpriviledged CT in the GUI
* default to unprivileged CT in the GUI
* fix #1769: restart mode for bulk lxc migration
@ -3332,7 +3419,7 @@ pve-manager (5.2-11) unstable; urgency=medium
* Update default CIPHERS to a more current list
* close #584: ui qemu: changed remove unused disk to asynchron call
* close #584: ui qemu: changed remove unused disk to asynchronous call
* dc/user: render user fullnames htmlEncoded
@ -3652,7 +3739,7 @@ pve-manager (5.1-46) stable; urgency=medium
* fix #1635: correctly change maxIds in ControllerSelector in wizard
* enable/disable xtermjs depending on the existance of a serial port
* enable/disable xtermjs depending on the existence of a serial port
* show serial ports on the gui
@ -4084,7 +4171,7 @@ pve-manager (5.0-12) unstable; urgency=medium
* translate params in bulk migrate window
* change to debians font-awesome package
* change to debian's font-awesome package
* mark node shutdown/reboot buttons as dangerous
@ -4326,7 +4413,7 @@ pve-manager (4.4-9) unstable; urgency=medium
* use new PVE::Storage::check_volume_access()
* pvestatd.pm: corretly use new RPCEnvironment
* pvestatd.pm: correctly use new RPCEnvironment
* pveproxy: do not expose /pve2/js/ (no longer required)
@ -4394,7 +4481,7 @@ pve-manager (4.4-6) unstable; urgency=medium
* remove vznet.conf
* Reload the Ceph OSDTree afer adding an OSD
* Reload the Ceph OSDTree after adding an OSD
* fix #1230: add blocksize for ZFSPoolPlugin
@ -4408,7 +4495,7 @@ pve-manager (4.4-6) unstable; urgency=medium
pve-manager (4.4-5) unstable; urgency=medium
* add unpriviledged flag to container restore window
* add unprivileged flag to container restore window
-- Proxmox Support Team <support@proxmox.com> Tue, 27 Dec 2016 11:29:31 +0100
@ -4561,7 +4648,7 @@ pve-manager (4.3-11) unstable; urgency=medium
* API2Client: allow to set ssl options
* fix #1196: make restriced/nofailback boolean in gui
* fix #1196: make restricted/nofailback boolean in gui
* disable rules copy button if not 'in' or 'out' rule
@ -4859,7 +4946,7 @@ pve-manager (4.2-17) unstable; urgency=medium
* Add some extra debug information to the report.
* add ca-certificates as required depency
* add ca-certificates as required dependency
* lxc/status: show disk usage on running containers
@ -4917,7 +5004,7 @@ pve-manager (4.2-13) unstable; urgency=medium
pve-manager (4.2-12) unstable; urgency=medium
* Rename the SCSI VirtIO controller to better differenciate from VirtIO blk
* Rename the SCSI VirtIO controller to better differentiate from VirtIO blk
* fix #778: Case fix for VMware
@ -5064,7 +5151,7 @@ pve-manager (4.1-33) unstable; urgency=medium
pve-manager (4.1-32) unstable; urgency=medium
* GUI: enhace visibillity of dialog window border
* GUI: enhance visibility of dialog window border
* fix #143: add disk/mem percent sort
@ -5369,7 +5456,7 @@ pve-manager (4.0-63) unstable; urgency=medium
* pvestatd: use 'priv' environment for service
* improve log messages at sytem shutdown
* improve log messages at system shutdown
* pveproxy.service: add Wants=ssh.service
@ -5391,11 +5478,11 @@ pve-manager (4.0-61) unstable; urgency=medium
* ceph.conf : remove auth_supported
* Allow email adresses with a top level domain of up to 63 characters
* Allow email addresses with a top level domain of up to 63 characters
* fix bug #575: fork at shutdown, so that partent starts new worker
* update build infrastucture to be able to develop with Ext6
* update build infrastructure to be able to develop with Ext6
-- Proxmox Support Team <support@proxmox.com> Fri, 20 Nov 2015 10:05:01 +0100
@ -5463,7 +5550,7 @@ pve-manager (4.0-51) unstable; urgency=medium
pve-manager (4.0-50) unstable; urgency=medium
* fix bug #756: force macro setting to emtpy string when
* fix bug #756: force macro setting to empty string when
user erases the content
-- Proxmox Support Team <support@proxmox.com> Thu, 15 Oct 2015 17:41:02 +0200
@ -5540,7 +5627,7 @@ pve-manager (4.0-39) unstable; urgency=medium
* update sencha touch sources to 2.4.2
* Allow email adresses with a top level domain of up to 63 characters
* Allow email addresses with a top level domain of up to 63 characters
* Deny stop mode backups from HA managed and enabled services
@ -5947,7 +6034,7 @@ pve-manager (3.3-12) unstable; urgency=low
pve-manager (3.3-11) unstable; urgency=low
* New 'disconnect' button on the network edit pannel.
* New 'disconnect' button on the network edit panel.
* Fix backup failure at shutdown (stop backup on host shutdown)
@ -5997,7 +6084,7 @@ pve-manager (3.3-5) unstable; urgency=low
pve-manager (3.3-4) unstable; urgency=low
* pveproxy: limit ourselfes to tlsv1
* pveproxy: limit ourselves to tlsv1
-- Proxmox Support Team <support@proxmox.com> Wed, 15 Oct 2014 15:49:17 +0200
@ -6486,7 +6573,7 @@ pve-manager (3.1-14) unstable; urgency=low
pve-manager (3.1-13) unstable; urgency=low
* dissable SSL compression (avoid TLS CRIME vulnerability)
* disable SSL compression (avoid TLS CRIME vulnerability)
-- Proxmox Support Team <support@proxmox.com> Tue, 17 Sep 2013 07:17:17 +0200
@ -6703,7 +6790,7 @@ pve-manager (3.0-23) unstable; urgency=low
* fix bug #401: disable connection timeout during API call processing
* add suport for new qemu-server async configuration API
* add support for new qemu-server async configuration API
-- Proxmox Support Team <support@proxmox.com> Fri, 07 Jun 2013 11:52:15 +0200
@ -7049,7 +7136,7 @@ pve-manager (2.2-19) unstable; urgency=low
pve-manager (2.2-18) unstable; urgency=low
* add workaroud to correctly show scrollbars
* add workaround to correctly show scrollbars
-- Proxmox Support Team <support@proxmox.com> Wed, 26 Sep 2012 11:22:16 +0200
@ -7522,7 +7609,7 @@ pve-manager (2.0-14) unstable; urgency=low
pve-manager (2.0-13) unstable; urgency=low
* implemente openvz migration ('pvectl migrate')
* implement openvz migration ('pvectl migrate')
-- Proxmox Support Team <support@proxmox.com> Fri, 02 Dec 2011 10:37:28 +0100
@ -7560,7 +7647,7 @@ pve-manager (2.0-8) unstable; urgency=low
pve-manager (2.0-7) unstable; urgency=low
* imlement openvz GUI
* implement openvz GUI
-- Proxmox Support Team <support@proxmox.com> Fri, 07 Oct 2011 13:56:03 +0200
@ -7642,7 +7729,7 @@ pve-manager (1.5-3) unstable; urgency=low
pve-manager (1.5-2) unstable; urgency=low
* add CPUs settings for OpenVZ (work now withe kernel 2.6.18)
* add CPUs settings for OpenVZ (work now with kernel 2.6.18)
-- Proxmox Support Team <support@proxmox.com> Tue, 12 Jan 2010 09:42:08 +0100

18
debian/control vendored
View File

@ -7,20 +7,20 @@ Build-Depends: debhelper-compat (= 13),
libhttp-daemon-perl,
libpod-parser-perl,
libproxmox-acme-perl,
libproxmox-rs-perl (>= 0.2.0),
libproxmox-rs-perl (>= 0.3.4),
libpve-access-control (>= 8.0.7),
libpve-cluster-api-perl,
libpve-cluster-perl (>= 6.1-6),
libpve-common-perl (>= 8.1.2),
libpve-common-perl (>= 8.2.3),
libpve-guest-common-perl (>= 5.1.1),
libpve-http-server-perl (>= 2.0-12),
libpve-notify-perl,
libpve-rs-perl (>= 0.7.1),
libpve-rs-perl (>= 0.8.10),
libpve-storage-perl (>= 6.3-2),
libtemplate-perl,
libtest-mockmodule-perl,
lintian,
proxmox-widget-toolkit (>= 4.0.7),
proxmox-widget-toolkit (>= 4.1.4),
pve-cluster,
pve-container,
pve-doc-generator (>= 8.0.5),
@ -56,15 +56,15 @@ Depends: apt (>= 1.5~),
libnet-dns-perl,
libproxmox-acme-perl,
libproxmox-acme-plugins,
libproxmox-rs-perl (>= 0.2.0),
libproxmox-rs-perl (>= 0.3.4),
libpve-access-control (>= 8.1.3),
libpve-cluster-api-perl (>= 7.0-5),
libpve-cluster-perl (>= 7.2-3),
libpve-common-perl (>= 8.2.0),
libpve-guest-common-perl (>= 5.1.0),
libpve-http-server-perl (>= 4.1-1),
libpve-common-perl (>= 8.2.3),
libpve-guest-common-perl (>= 5.1.4),
libpve-http-server-perl (>= 5.1.1),
libpve-notify-perl (>= 8.0.5),
libpve-rs-perl (>= 0.7.1),
libpve-rs-perl (>= 0.8.10),
libpve-storage-perl (>= 8.1.5),
librados2-perl (>= 1.3-1),
libtemplate-perl,

6
debian/copyright vendored
View File

@ -26,10 +26,10 @@ http://www.sencha.com/license
Open Source License
----------------------------------------------------------------------------
This version of Ext JS is licensed under the terms of the
Open Source GPL 3.0 license.
This version of Ext JS is licensed under the terms of the
Open Source GPL 3.0 license.
http://www.gnu.org/licenses/gpl.html (/usr/share/common-licenses/GPL-3)
http://www.gnu.org/licenses/gpl.html (/usr/share/common-licenses/GPL-3)
There are several FLOSS exceptions available for use with this release for
open source applications that are distributed under a license other than GPL.

View File

@ -65,7 +65,7 @@ Ext.define('PVE.Parser', {
if (Ext.isDefined(res[defaultKey])) {
throw 'defaultKey may be only defined once in propertyString';
}
res[defaultKey] = k; // k ist the value in this case
res[defaultKey] = k; // k is the value in this case
} else {
throw `Failed to parse key-value pair: ${property}`;
}

View File

@ -118,7 +118,8 @@ Ext.define('PVE.Utils', {
}
if (service.ceph_version) {
var match = service.ceph_version.match(/version (\d+(\.\d+)*)/);
// See PVE/Ceph/Tools.pm - get_local_version
const match = service.ceph_version.match(/^ceph.*\sv?(\d+(?:\.\d+)+)/);
if (match) {
return match[1];
}
@ -1077,13 +1078,14 @@ Ext.define('PVE.Utils', {
}
var maxcpu = node.data.maxcpu || 1;
if (!Ext.isNumeric(maxcpu) && (maxcpu >= 1)) {
if (!Ext.isNumeric(maxcpu) || maxcpu < 1) {
return '';
}
var per = (record.data.cpu/maxcpu) * record.data.maxcpu * 100;
const cpu_label = maxcpu > 1 ? 'CPUs' : 'CPU';
return per.toFixed(1) + '% of ' + maxcpu.toString() + (maxcpu > 1 ? 'CPUs' : 'CPU');
return `${per.toFixed(1)}% of ${maxcpu} ${cpu_label}`;
},
render_bandwidth: function(value) {
@ -2063,6 +2065,17 @@ Ext.define('PVE.Utils', {
zfscreate: [gettext('ZFS Storage'), gettext('Create')],
zfsremove: ['ZFS Pool', gettext('Remove')],
});
Proxmox.Utils.overrideNotificationFieldName({
'job-id': gettext('Job ID'),
});
Proxmox.Utils.overrideNotificationFieldValue({
'package-updates': gettext('Package updates are available'),
'vzdump': gettext('Backup notifications'),
'replication': gettext('Replication job notifications'),
'fencing': gettext('Node fencing notifications'),
});
},
});

View File

@ -13,18 +13,17 @@ Ext.define('PVE.CephCreateService', {
me.nodename = node;
me.updateUrl();
},
setExtraID: function(extraID) {
setServiceID: function(value) {
let me = this;
me.extraID = me.type === 'mds' ? `-${extraID}` : '';
me.serviceID = value;
me.updateUrl();
},
updateUrl: function() {
let me = this;
let extraID = me.extraID ?? '';
let node = me.nodename;
let serviceID = me.serviceID ?? me.nodename;
me.url = `/nodes/${node}/ceph/${me.type}/${node}${extraID}`;
me.url = `/nodes/${node}/ceph/${me.type}/${serviceID}`;
},
defaults: {
@ -40,17 +39,19 @@ Ext.define('PVE.CephCreateService', {
listeners: {
change: function(f, value) {
let view = this.up('pveCephCreateService');
view.lookup('mds-id').setValue(value);
view.setNode(value);
},
},
},
{
xtype: 'textfield',
fieldLabel: gettext('Extra ID'),
regex: /[a-zA-Z0-9]+/,
regexText: gettext('ID may only consist of alphanumeric characters'),
reference: 'mds-id',
fieldLabel: gettext('MDS ID'),
regex: /^([a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?)$/,
regexText: gettext('ID may consist of alphanumeric characters and hyphen. It cannot start with a number or end in a hyphen.'),
submitValue: false,
emptyText: Proxmox.Utils.NoneText,
allowBlank: false,
cbind: {
disabled: get => get('type') !== 'mds',
hidden: get => get('type') !== 'mds',
@ -58,7 +59,7 @@ Ext.define('PVE.CephCreateService', {
listeners: {
change: function(f, value) {
let view = this.up('pveCephCreateService');
view.setExtraID(value);
view.setServiceID(value);
},
},
},
@ -73,7 +74,7 @@ Ext.define('PVE.CephCreateService', {
cbind: {
hidden: get => get('type') !== 'mds',
},
html: gettext('The Extra ID allows creating multiple MDS per node, which increases redundancy with more than one CephFS.'),
html: gettext('By using different IDs, you can have multiple MDS per node, which increases redundancy with more than one CephFS.'),
},
],

View File

@ -265,7 +265,7 @@ Ext.define('PVE.ceph.ServiceList', {
});
me.ids.push(list[i].id);
} else {
delete pendingRemoval[list[i].id]; // drop exisiting from for-removal
delete pendingRemoval[list[i].id]; // drop existing from for-removal
}
service.updateService(list[i].title, list[i].text, list[i].health);
}

View File

@ -10,7 +10,7 @@ Ext.define('PVE.container.TwoColumnContainer', {
layout: {
type: 'hbox',
align: 'stretch',
align: 'begin',
},
// The default ratio of the start widget. It an be an integer or a floating point number

View File

@ -124,7 +124,7 @@ Ext.define('PVE.dc.ACMEPluginEditor', {
datafield.setValue(extradata.join('\n'));
if (!me.createdInitially) {
datafield.resetOriginalValue();
me.createdInitially = true; // save that we initally set that
me.createdInitially = true; // save that we initially set that
}
},
onGetValues: function(values) {

View File

@ -45,10 +45,6 @@ Ext.define('PVE.dc.BackupEdit', {
Proxmox.Utils.assemble_field_data(values, { 'delete': 'notification-target' });
}
if (!values.id && isCreate) {
values.id = 'backup-' + Ext.data.identifier.Uuid.Global.generate().slice(0, 13);
}
let selMode = values.selMode;
delete values.selMode;

View File

@ -238,7 +238,7 @@ Ext.define('PVE.dc.BackupInfo', {
const modeToDisplay = {
snapshot: gettext('Snapshot'),
stop: gettext('Stop'),
suspend: gettext('Snapshot'),
suspend: gettext('Suspend'),
};
return modeToDisplay[value] ?? gettext('Unknown');
},

View File

@ -151,6 +151,11 @@ Ext.define('PVE.dc.Summary', {
} else if (countedStorage[sid]) {
break;
}
if (data.status === "unknown") {
break;
}
used += data.disk;
total += data.maxdisk;
countedStorage[sid] = true;

View File

@ -37,7 +37,7 @@ Ext.define('PVE.form.SizeField', {
unit: 'MiB',
unitPostfix: '',
// use this if the backend saves values in another unit tha bytes, e.g.,
// use this if the backend saves values in a unit other than bytes, e.g.,
// for KiB set it to 'KiB'
backendUnit: undefined,

View File

@ -40,7 +40,7 @@ Ext.define('PVE.form.SDNZoneSelector', {
}, function() {
Ext.define('pve-sdn-zone', {
extend: 'Ext.data.Model',
fields: ['zone'],
fields: ['zone', 'type'],
proxy: {
type: 'proxmox',
url: "/api2/json/cluster/sdn/zones",

View File

@ -43,7 +43,7 @@ Ext.define('PVE.lxc.CPUEdit', {
},
});
// The view model of the parent shoul contain a 'cgroupMode' variable (or params for v2 are used).
// The view model of the parent should contain a 'cgroupMode' variable (or params for v2 are used).
Ext.define('PVE.lxc.CPUInputPanel', {
extend: 'Proxmox.panel.InputPanel',
alias: 'widget.pveLxcCPUInputPanel',

View File

@ -37,6 +37,10 @@ Ext.define('PVE.panel.BackupAdvancedOptions', {
return {};
}
if (!formValues.id && me.isCreate) {
formValues.id = 'backup-' + Ext.data.identifier.Uuid.Global.generate().slice(0, 13);
}
let options = {};
if (!me.isCreate) {
@ -108,6 +112,25 @@ Ext.define('PVE.panel.BackupAdvancedOptions', {
},
items: [
{
xtype: 'pveTwoColumnContainer',
startColumn: {
xtype: 'pmxDisplayEditField',
vtype: 'ConfigId',
fieldLabel: gettext('Job ID'),
emptyText: gettext('Autogenerate'),
name: 'id',
allowBlank: true,
cbind: {
editable: '{isCreate}',
},
},
endFlex: 2,
endColumn: {
xtype: 'displayfield',
value: gettext('Can be used in notification matchers to match this job.'),
},
},
{
xtype: 'pveTwoColumnContainer',
startColumn: {
@ -249,14 +272,14 @@ Ext.define('PVE.panel.BackupAdvancedOptions', {
value: '__default__',
comboItems: [
['__default__', "Default"],
['data', "Data"],
['metadata', "Metadata"],
['data', "Data (experimental)"],
['metadata', "Metadata (experimental)"],
],
},
endFlex: 2,
endColumn: {
xtype: 'displayfield',
value: gettext("EXPERIMENTAL: Mode to detect file changes and archive encoding format for container backups."),
value: gettext("Mode to detect file changes and switch archive encoding format for container backups (NOTE: `data` and `metadata` modes are experimental)."),
},
},
{

View File

@ -486,17 +486,19 @@ Ext.define('PVE.qemu.HardwareView', {
return msg;
},
handler: function(btn, e, rec) {
let params = { 'delete': rec.data.key };
if (btn.RESTMethod === 'POST') {
params.background_delay = 5;
}
Proxmox.Utils.API2Request({
url: '/api2/extjs/' + baseurl,
waitMsgTarget: me,
method: btn.RESTMethod,
params: {
'delete': rec.data.key,
},
params: params,
callback: () => me.reload(),
failure: response => Ext.Msg.alert('Error', response.htmlStatus),
success: function(response, options) {
if (btn.RESTMethod === 'POST') {
if (btn.RESTMethod === 'POST' && response.result.data !== null) {
Ext.create('Proxmox.window.TaskProgress', {
autoShow: true,
upid: response.result.data,
@ -591,7 +593,7 @@ Ext.define('PVE.qemu.HardwareView', {
me.down('#addNet').setDisabled(noVMConfigNetPerm || isAtLimit('net'));
me.down('#addRng').setDisabled(noSysConsolePerm || isAtLimit('rng'));
efidisk_menuitem.setDisabled(noVMConfigDiskPerm || isAtLimit('efidisk'));
me.down('#addTpmState').setDisabled(noSysConsolePerm || isAtLimit('tpmstate'));
me.down('#addTpmState').setDisabled(noVMConfigDiskPerm || isAtLimit('tpmstate'));
me.down('#addCloudinitDrive').setDisabled(noVMConfigCDROMPerm || noVMConfigCloudinitPerm || hasCloudInit);
if (!rec) {
@ -606,6 +608,7 @@ Ext.define('PVE.qemu.HardwareView', {
const deleted = !!rec.data.delete;
const pending = deleted || me.hasPendingChanges(key);
const isRunning = me.pveSelNode.data.running;
const isCloudInit = isCloudInitKey(value);
const isCDRom = value && !!value.toString().match(/media=cdrom/);
@ -614,7 +617,7 @@ Ext.define('PVE.qemu.HardwareView', {
const isUsedDisk = !isUnusedDisk && row.isOnStorageBus && !isCDRom;
const isDisk = isUnusedDisk || isUsedDisk;
const isEfi = key === 'efidisk0';
const tpmMoveable = key === 'tpmstate0' && !me.pveSelNode.data.running;
const tpmMoveable = key === 'tpmstate0' && !isRunning;
let cannotDelete = deleted || row.never_delete;
cannotDelete ||= isCDRom && !cdromCap;
@ -623,7 +626,7 @@ Ext.define('PVE.qemu.HardwareView', {
remove_btn.setDisabled(cannotDelete);
remove_btn.setText(isUsedDisk && !isCloudInit ? remove_btn.altText : remove_btn.defaultText);
remove_btn.RESTMethod = isUnusedDisk ? 'POST':'PUT';
remove_btn.RESTMethod = isUnusedDisk || (isDisk && isRunning) ? 'POST' : 'PUT';
edit_btn.setDisabled(
deleted || !row.editor || isCloudInit || (isCDRom && !cdromCap) || (isDisk && !diskCap));

View File

@ -61,7 +61,7 @@ Ext.define('PVE.qemu.OSDefaults', {
networkCard: 'virtio',
});
// recommandation from http://wiki.qemu.org/Windows2000
// recommendation from http://wiki.qemu.org/Windows2000
addOS({
pveOS: 'w2k',
parent: 'generic',

View File

@ -180,7 +180,7 @@ Ext.define('PVE.qemu.PCIInputPanel', {
columnWidth: 1,
padding: '0 0 10 0',
itemId: 'iommuwarning',
value: 'The selected Device is not in a seperate IOMMU group, make sure this is intended.',
value: 'The selected Device is not in a separate IOMMU group, make sure this is intended.',
userCls: 'pmx-hint',
},
];

View File

@ -1,4 +1,4 @@
// The view model of the parent shoul contain a 'cgroupMode' variable (or params for v2 are used).
// The view model of the parent should contain a 'cgroupMode' variable (or params for v2 are used).
Ext.define('PVE.qemu.ProcessorInputPanel', {
extend: 'Proxmox.panel.InputPanel',
alias: 'widget.pveQemuProcessorPanel',

View File

@ -12,6 +12,13 @@ Ext.define('PVE.sdn.VnetInputPanel', {
return values;
},
initComponent: function() {
let me = this;
me.callParent();
me.setZoneType(undefined);
},
items: [
{
xtype: 'pmxDisplayEditField',
@ -40,9 +47,21 @@ Ext.define('PVE.sdn.VnetInputPanel', {
name: 'zone',
value: '',
allowBlank: false,
listeners: {
change: function() {
let me = this;
let record = me.findRecordByValue(me.value);
let zoneType = record?.data?.type;
let panel = me.up('panel');
panel.setZoneType(zoneType);
},
},
},
{
xtype: 'proxmoxintegerfield',
itemId: 'sdnVnetTagField',
name: 'tag',
minValue: 1,
maxValue: 16777216,
@ -54,6 +73,7 @@ Ext.define('PVE.sdn.VnetInputPanel', {
},
{
xtype: 'proxmoxcheckbox',
itemId: 'sdnVnetVlanAwareField',
name: 'vlanaware',
uncheckedValue: null,
checked: false,
@ -63,6 +83,26 @@ Ext.define('PVE.sdn.VnetInputPanel', {
},
},
],
setZoneType: function(zoneType) {
let me = this;
let tagField = me.down('#sdnVnetTagField');
if (!zoneType || zoneType === 'simple') {
tagField.setVisible(false);
tagField.setValue('');
} else {
tagField.setVisible(true);
}
let vlanField = me.down('#sdnVnetVlanAwareField');
if (!zoneType || zoneType === 'evpn') {
vlanField.setVisible(false);
vlanField.setValue('');
} else {
vlanField.setVisible(true);
}
},
});
Ext.define('PVE.sdn.VnetEdit', {

View File

@ -156,15 +156,20 @@ Ext.define('PVE.sdn.DhcpTree', {
openEditWindow: function(data) {
let me = this;
let extraRequestParams = {
mac: data.mac,
zone: data.zone,
vnet: data.vnet,
};
if (data.vmid) {
extraRequestParams.vmid = data.vmid;
}
Ext.create('PVE.sdn.IpamEdit', {
autoShow: true,
mapping: data,
extraRequestParams: {
vmid: data.vmid,
mac: data.mac,
zone: data.zone,
vnet: data.vnet,
},
extraRequestParams,
listeners: {
destroy: () => me.reload(),
},

View File

@ -66,12 +66,12 @@ Ext.define('PVE.ceph.Install', {
},
handler: function() {
let view = this.up('pveCephInstallWindow');
let wizzard = Ext.create('PVE.ceph.CephInstallWizard', {
let wizard = Ext.create('PVE.ceph.CephInstallWizard', {
nodename: view.nodename,
});
wizzard.getViewModel().set('isInstalled', this.getViewModel().get('isInstalled'));
wizzard.show();
view.mon(wizzard, 'beforeClose', function() {
wizard.getViewModel().set('isInstalled', this.getViewModel().get('isInstalled'));
wizard.show();
view.mon(wizard, 'beforeClose', function() {
view.fireEvent("cephInstallWindowClosed");
view.close();
});

View File

@ -929,7 +929,7 @@ Ext.define('PVE.window.GuestImport', {
let renderWarning = w => {
const warningsCatalogue = {
'cdrom-image-ignored': gettext("CD-ROM images cannot get imported, if required you can reconfigure the '{0}' drive in the 'Advanced' tab."),
'nvme-unsupported': gettext("NVMe disks are currently not supported, '{0}' will get attaced as SCSI"),
'nvme-unsupported': gettext("NVMe disks are currently not supported, '{0}' will get attached as SCSI"),
'ovmf-with-lsi-unsupported': gettext("OVMF is built without LSI drivers, scsi hardware was set to '{1}'"),
'serial-port-socket-only': gettext("Serial socket '{0}' will be mapped to a socket"),
'guest-is-running': gettext('Virtual guest seems to be running on source host. Import might fail or have inconsistent state!'),

View File

@ -102,7 +102,7 @@ Ext.define('PVE.window.Migrate', {
},
onTargetChange: function(nodeSelector) {
// Always display the storages of the currently seleceted migration target
// Always display the storages of the currently selected migration target
this.lookup('pveDiskStorageSelector').setNodename(nodeSelector.value);
this.checkMigratePreconditions();
},

View File

@ -126,8 +126,10 @@ Ext.define('PVE.window.PCIMapEditWindow', {
this.lookup('pciselector').setMdev(value);
},
nodeChange: function(_field, value) {
this.lookup('pciselector').setNodename(value);
nodeChange: function(field, value) {
if (!field.isDisabled()) {
this.lookup('pciselector').setNodename(value);
}
},
pciChange: function(_field, value) {

View File

@ -132,7 +132,7 @@ Ext.define('PVE.window.Restore', {
if (key === '#qmdump#map') {
let match = value.match(/^(\S+):(\S+):(\S*):(\S*):$/) ?? [];
// if a /dev/XYZ disk was backed up, ther is no storage hint
// if a /dev/XYZ disk was backed up, there is no storage hint
allStoragesAvailable &&= !!match[3] && !!PVE.data.ResourceStore.getById(
`storage/${view.nodename}/${match[3]}`);
} else if (key === 'name' || key === 'hostname') {

View File

@ -5,7 +5,7 @@ Ext.define('PVE.window.TreeSettingsEdit', {
title: gettext('Tree Settings'),
isCreate: false,
url: '#', // ignored as submit() gets overriden here, but the parent class requires it
url: '#', // ignored as submit() gets overridden here, but the parent class requires it
width: 450,
fieldDefaults: {

View File

@ -101,9 +101,11 @@ Ext.define('PVE.window.USBMapEditWindow', {
usbsel.setDisabled(!value);
},
nodeChange: function(_field, value) {
this.lookup('id').setNodename(value);
this.lookup('path').setNodename(value);
nodeChange: function(field, value) {
if (!field.isDisabled()) {
this.lookup('id').setNodename(value);
this.lookup('path').setNodename(value);
}
},

View File

@ -586,6 +586,15 @@ utilities: {
}
},
overrideNotificationFieldName: function(extra) {
// do nothing, we don't have notification configuration in mobile ui
},
overrideNotificationFieldValue: function(extra) {
// do nothing, we don't have notification configuration in mobile ui
},
format_task_description: function(type, id) {
let farray = Proxmox.Utils.task_desc_table[type];
let text;

View File

@ -37,7 +37,7 @@ var js_api_version;
/**
* Message types for messsages to/from the extension
* Message types for messages to/from the extension
* @const
* @enum {string}
*/