5
0
mirror of git://git.proxmox.com/git/pve-storage.git synced 2025-01-25 06:03:53 +03:00
pve-storage/PVE/Storage/GlusterfsPlugin.pm
Fabian Ebner 8009417d0d plugins: allow limiting the number of protected backups per guest
The ability to mark backups as protected broke the implicit assumption
in vzdump that remove=1 and current number of backups being the limit
(i.e. sum of all keep options) will result in a backup being removed.

Introduce a new storage property 'max-protected-backups' to limit the
number of protected backups per guest. Use 5 as a default value, as it
should cover most use cases, while still not having too big of a
potential overhead in many scenarios.

For external plugins that do not return the backup subtype in
list_volumes, all protected backups with the same ID will count
towards the limit.

An alternative would be to count the protected backups when pruning.
While that would avoid the need for a new property, it would break the
current semantics of protected backups being ignored for pruning. It
also would be less flexible, e.g. for PBS, it can make sense to have
both keep-all=1 and a limit for the number of protected snapshots on
the PVE side.

Signed-off-by: Fabian Ebner <f.ebner@proxmox.com>
2022-04-06 09:47:12 +02:00

355 lines
8.8 KiB
Perl

package PVE::Storage::GlusterfsPlugin;
use strict;
use warnings;
use IO::File;
use File::Path;
use PVE::Tools qw(run_command);
use PVE::ProcFSTools;
use PVE::Network;
use PVE::Storage::Plugin;
use PVE::JSONSchema qw(get_standard_option);
use base qw(PVE::Storage::Plugin);
# Glusterfs helper functions
my $server_test_results = {};
my $get_active_server = sub {
my ($scfg, $return_default_if_offline) = @_;
my $defaultserver = $scfg->{server} ? $scfg->{server} : 'localhost';
if ($return_default_if_offline && !defined($scfg->{server2})) {
# avoid delays (there is no backup server anyways)
return $defaultserver;
}
my $serverlist = [ $defaultserver ];
push @$serverlist, $scfg->{server2} if $scfg->{server2};
my $ctime = time();
foreach my $server (@$serverlist) {
my $stat = $server_test_results->{$server};
return $server if $stat && $stat->{active} && (($ctime - $stat->{time}) <= 2);
}
foreach my $server (@$serverlist) {
my $status = 0;
if ($server && $server ne 'localhost' && $server ne '127.0.0.1' && $server ne '::1') {
# ping the gluster daemon default port (24007) as heuristic
$status = PVE::Network::tcp_ping($server, 24007, 2);
} else {
my $parser = sub {
my $line = shift;
if ($line =~ m/Status: Started$/) {
$status = 1;
}
};
my $cmd = ['/usr/sbin/gluster', 'volume', 'info', $scfg->{volume}];
run_command($cmd, errmsg => "glusterfs error", errfunc => sub {}, outfunc => $parser);
}
$server_test_results->{$server} = { time => time(), active => $status };
return $server if $status;
}
return $defaultserver if $return_default_if_offline;
return undef;
};
sub glusterfs_is_mounted {
my ($volume, $mountpoint, $mountdata) = @_;
$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
return $mountpoint if grep {
$_->[2] eq 'fuse.glusterfs' &&
$_->[0] =~ /^\S+:\Q$volume\E$/ &&
$_->[1] eq $mountpoint
} @$mountdata;
return undef;
}
sub glusterfs_mount {
my ($server, $volume, $mountpoint) = @_;
my $source = "$server:$volume";
my $cmd = ['/bin/mount', '-t', 'glusterfs', $source, $mountpoint];
run_command($cmd, errmsg => "mount error");
}
# Configuration
sub type {
return 'glusterfs';
}
sub plugindata {
return {
content => [ { images => 1, vztmpl => 1, iso => 1, backup => 1, snippets => 1},
{ images => 1 }],
format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ],
};
}
sub properties {
return {
volume => {
description => "Glusterfs Volume.",
type => 'string',
},
server2 => {
description => "Backup volfile server IP or DNS name.",
type => 'string', format => 'pve-storage-server',
requires => 'server',
},
transport => {
description => "Gluster transport: tcp or rdma",
type => 'string',
enum => ['tcp', 'rdma', 'unix'],
},
};
}
sub options {
return {
path => { fixed => 1 },
server => { optional => 1 },
server2 => { optional => 1 },
volume => { fixed => 1 },
transport => { optional => 1 },
nodes => { optional => 1 },
disable => { optional => 1 },
maxfiles => { optional => 1 },
'prune-backups' => { optional => 1 },
'max-protected-backups' => { optional => 1 },
content => { optional => 1 },
format => { optional => 1 },
mkdir => { optional => 1 },
bwlimit => { optional => 1 },
preallocation => { optional => 1 },
};
}
sub check_config {
my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
$config->{path} = "/mnt/pve/$sectionId" if $create && !$config->{path};
return $class->SUPER::check_config($sectionId, $config, $create, $skipSchemaCheck);
}
# Storage implementation
sub parse_name_dir {
my $name = shift;
if ($name =~ m!^((base-)?[^/\s]+\.(raw|qcow2|vmdk))$!) {
return ($1, $3, $2);
}
die "unable to parse volume filename '$name'\n";
}
sub path {
my ($class, $scfg, $volname, $storeid, $snapname) = @_;
my ($vtype, $name, $vmid, undef, undef, $isBase, $format) =
$class->parse_volname($volname);
# Note: qcow2/qed has internal snapshot, so path is always
# the same (with or without snapshot => same file).
die "can't snapshot this image format\n"
if defined($snapname) && $format !~ m/^(qcow2|qed)$/;
my $path = undef;
if ($vtype eq 'images') {
my $server = &$get_active_server($scfg, 1);
my $glustervolume = $scfg->{volume};
my $transport = $scfg->{transport};
my $protocol = "gluster";
if ($transport) {
$protocol = "gluster+$transport";
}
$path = "$protocol://$server/$glustervolume/images/$vmid/$name";
} else {
my $dir = $class->get_subdir($scfg, $vtype);
$path = "$dir/$name";
}
return wantarray ? ($path, $vmid, $vtype) : $path;
}
sub clone_image {
my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_;
die "storage definition has no path\n" if !$scfg->{path};
my ($vtype, $basename, $basevmid, undef, undef, $isBase, $format) =
$class->parse_volname($volname);
die "clone_image on wrong vtype '$vtype'\n" if $vtype ne 'images';
die "this storage type does not support clone_image on snapshot\n" if $snap;
die "this storage type does not support clone_image on subvolumes\n" if $format eq 'subvol';
die "clone_image only works on base images\n" if !$isBase;
my $imagedir = $class->get_subdir($scfg, 'images');
$imagedir .= "/$vmid";
mkpath $imagedir;
my $name = $class->find_free_diskname($storeid, $scfg, $vmid, "qcow2", 1);
warn "clone $volname: $vtype, $name, $vmid to $name (base=../$basevmid/$basename)\n";
my $path = "$imagedir/$name";
die "disk image '$path' already exists\n" if -e $path;
my $server = &$get_active_server($scfg, 1);
my $glustervolume = $scfg->{volume};
my $volumepath = "gluster://$server/$glustervolume/images/$vmid/$name";
my $cmd = ['/usr/bin/qemu-img', 'create', '-b', "../$basevmid/$basename",
'-F', $format, '-f', 'qcow2', $volumepath];
run_command($cmd, errmsg => "unable to create image");
return "$basevmid/$basename/$vmid/$name";
}
sub alloc_image {
my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
my $imagedir = $class->get_subdir($scfg, 'images');
$imagedir .= "/$vmid";
mkpath $imagedir;
$name = $class->find_free_diskname($storeid, $scfg, $vmid, $fmt, 1) if !$name;
my (undef, $tmpfmt) = parse_name_dir($name);
die "illegal name '$name' - wrong extension for format ('$tmpfmt != '$fmt')\n"
if $tmpfmt ne $fmt;
my $path = "$imagedir/$name";
die "disk image '$path' already exists\n" if -e $path;
my $server = &$get_active_server($scfg, 1);
my $glustervolume = $scfg->{volume};
my $volumepath = "gluster://$server/$glustervolume/images/$vmid/$name";
my $cmd = ['/usr/bin/qemu-img', 'create'];
my $prealloc_opt = PVE::Storage::Plugin::preallocation_cmd_option($scfg, $fmt);
push @$cmd, '-o', $prealloc_opt if defined($prealloc_opt);
push @$cmd, '-f', $fmt, $volumepath, "${size}K";
eval { run_command($cmd, errmsg => "unable to create image"); };
if ($@) {
unlink $path;
rmdir $imagedir;
die "$@";
}
return "$vmid/$name";
}
sub status {
my ($class, $storeid, $scfg, $cache) = @_;
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
if !$cache->{mountdata};
my $path = $scfg->{path};
my $volume = $scfg->{volume};
return undef if !glusterfs_is_mounted($volume, $path, $cache->{mountdata});
return $class->SUPER::status($storeid, $scfg, $cache);
}
sub activate_storage {
my ($class, $storeid, $scfg, $cache) = @_;
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
if !$cache->{mountdata};
my $path = $scfg->{path};
my $volume = $scfg->{volume};
if (!glusterfs_is_mounted($volume, $path, $cache->{mountdata})) {
mkpath $path if !(defined($scfg->{mkdir}) && !$scfg->{mkdir});
die "unable to activate storage '$storeid' - " .
"directory '$path' does not exist\n" if ! -d $path;
my $server = &$get_active_server($scfg, 1);
glusterfs_mount($server, $volume, $path);
}
$class->SUPER::activate_storage($storeid, $scfg, $cache);
}
sub deactivate_storage {
my ($class, $storeid, $scfg, $cache) = @_;
$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
if !$cache->{mountdata};
my $path = $scfg->{path};
my $volume = $scfg->{volume};
if (glusterfs_is_mounted($volume, $path, $cache->{mountdata})) {
my $cmd = ['/bin/umount', $path];
run_command($cmd, errmsg => 'umount error');
}
}
sub activate_volume {
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
# do nothing by default
}
sub deactivate_volume {
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
# do nothing by default
}
sub check_connection {
my ($class, $storeid, $scfg, $cache) = @_;
my $server = &$get_active_server($scfg);
return defined($server) ? 1 : 0;
}
1;