5
0
mirror of git://git.proxmox.com/git/pve-ha-manager.git synced 2025-02-02 13:47:16 +03:00

fixing typos, also whitespace cleanup in PVE2 env class

fix typos through the whole project, used codespell to find most of
them.
Also do a big whitespace cleanup in the PVE2 enviorment class.

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht 2015-09-16 11:25:17 +02:00 committed by Dietmar Maurer
parent 4877c0312a
commit 63f6a08c82
6 changed files with 32 additions and 33 deletions

6
README
View File

@ -41,7 +41,7 @@ The Proxmox 'pmxcfs' implements this on top of corosync.
=== Watchdog ===
We need a reliable watchdog mechanism, which is able to provide hard
timeouts. It must be guaranteed that the node reboot withing specified
timeouts. It must be guaranteed that the node reboots within the specified
timeout if we do not update the watchdog. For me it looks that neither
systemd nor the standard watchdog(8) daemon provides such guarantees.
@ -52,7 +52,7 @@ provides that service to several other daemons using a local socket.
== Self fencing ==
A node needs to aquire a special 'ha_agent_${node}_lock' (one separate
A node needs to acquire a special 'ha_agent_${node}_lock' (one separate
lock for each node) before starting HA resources, and the node updates
the watchdog device once it get that lock. If the node loose quorum,
or is unable to get the 'ha_agent_${node}_lock', the watchdog is no
@ -63,7 +63,7 @@ This makes sure that the node holds the 'ha_agent_${node}_lock' as
long as there are running services on that node.
The HA manger can assume that the watchdog triggered a reboot when he
is able to aquire the 'ha_agent_${node}_lock' for that node.
is able to acquire the 'ha_agent_${node}_lock' for that node.
=== Problems with "two_node" Clusters ===

View File

@ -105,14 +105,14 @@ sub log {
return $self->{plug}->log($level, @args);
}
# aquire a cluster wide manager lock
# acquire a cluster wide manager lock
sub get_ha_manager_lock {
my ($self) = @_;
return $self->{plug}->get_ha_manager_lock();
}
# aquire a cluster wide node agent lock
# acquire a cluster wide node agent lock
sub get_ha_agent_lock {
my ($self, $node) = @_;

View File

@ -47,7 +47,7 @@ sub read_manager_status {
sub write_manager_status {
my ($self, $status_obj) = @_;
PVE::HA::Config::write_manager_status($status_obj);
}
@ -63,7 +63,7 @@ sub write_lrm_status {
my ($self, $status_obj) = @_;
my $node = $self->{nodename};
PVE::HA::Config::write_lrm_status($node, $status_obj);
}
@ -81,7 +81,7 @@ sub read_crm_commands {
sub service_config_exists {
my ($self) = @_;
return PVE::HA::Config::resources_config_exists();
}
@ -89,7 +89,7 @@ sub read_service_config {
my ($self) = @_;
my $res = PVE::HA::Config::read_resources_config();
my $vmlist = PVE::Cluster::get_vmlist();
my $conf = {};
@ -116,7 +116,7 @@ sub read_service_config {
}
}
}
return $conf;
}
@ -147,7 +147,7 @@ sub get_node_info {
my ($self) = @_;
my ($node_info, $quorate) = ({}, 0);
my $nodename = $self->{nodename};
$quorate = PVE::Cluster::check_cfs_quorum(1) || 0;
@ -156,11 +156,11 @@ sub get_node_info {
foreach my $node (keys %$members) {
my $d = $members->{$node};
$node_info->{$node}->{online} = $d->{online};
$node_info->{$node}->{online} = $d->{online};
}
$node_info->{$nodename}->{online} = 1; # local node is always up
return ($node_info, $quorate);
}
@ -187,7 +187,7 @@ sub get_pve_lock {
my $retry = 0;
my $retry_timeout = 100; # fixme: what timeout
eval {
mkdir $lockdir;
@ -219,15 +219,15 @@ sub get_pve_lock {
# $self->log('err', $err) if $err; # for debugging
return 0;
}
$last_lock_status->{$lockid} = $got_lock ? $ctime : 0;
if (!!$got_lock != !!$last) {
if ($got_lock) {
$self->log('info', "successfully aquired lock '$lockid'");
$self->log('info', "successfully acquired lock '$lockid'");
} else {
my $msg = "lost lock '$lockid";
$msg .= " - $err" if $err;
$msg .= " - $err" if $err;
$self->log('err', $msg);
}
} else {
@ -245,7 +245,7 @@ sub get_ha_manager_lock {
sub get_ha_agent_lock {
my ($self, $node) = @_;
$node = $self->nodename() if !defined($node);
return $self->get_pve_lock("ha_agent_${node}_lock");
@ -255,10 +255,10 @@ sub quorate {
my ($self) = @_;
my $quorate = 0;
eval {
$quorate = PVE::Cluster::check_cfs_quorum();
eval {
$quorate = PVE::Cluster::check_cfs_quorum();
};
return $quorate;
}
@ -290,7 +290,7 @@ sub loop_start_hook {
my ($self) = @_;
PVE::Cluster::cfs_update();
$self->{loop_start} = $self->get_time();
}
@ -298,7 +298,7 @@ sub loop_end_hook {
my ($self) = @_;
my $delay = $self->get_time() - $self->{loop_start};
warn "loop take too long ($delay seconds)\n" if $delay > 30;
}
@ -313,7 +313,7 @@ sub watchdog_open {
Type => SOCK_STREAM(),
Peer => "/run/watchdog-mux.sock") ||
die "unable to open watchdog socket - $!\n";
$self->log('info', "watchdog active");
}
@ -367,15 +367,15 @@ sub exec_resource_agent {
my ($self, $sid, $service_config, $cmd, @params) = @_;
# setup execution environment
$ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
PVE::INotify::inotify_close();
PVE::INotify::inotify_init();
PVE::Cluster::cfs_update();
my $nodename = $self->{nodename};
# fixme: return valid_exit code (instead of using die) ?
@ -473,8 +473,7 @@ sub exec_resource_agent {
} elsif ($cmd eq 'error') {
if($running) {
if ($running) {
$self->log("err", "service $sid is in an error state while running");
} else {
$self->log("warning", "service $sid is not running and in an error state");

View File

@ -379,7 +379,7 @@ sub manage {
next if !$fenced_nodes->{$sd->{node}};
# node fence was sucessful - mark service as stopped
# node fence was successful - mark service as stopped
&$change_service_state($self, $sid, 'stopped');
}

View File

@ -315,7 +315,7 @@ sub global_lock {
}
if (!$success) {
close($fh);
die "can't aquire lock '$lockfile' - $!\n";
die "can't acquire lock '$lockfile' - $!\n";
}
}

View File

@ -53,7 +53,7 @@ sub pve_verify_ha_group_node {
}
PVE::JSONSchema::register_standard_option('pve-ha-group-node-list', {
description => "List of cluster node names with optional priority. We use priority '0' as default. The CRM tries to run services on the node with higest priority (also see option 'nofailback').",
description => "List of cluster node names with optional priority. We use priority '0' as default. The CRM tries to run services on the node with highest priority (also see option 'nofailback').",
type => 'string', format => 'pve-ha-group-node-list',
typetext => '<node>[:<pri>]{,<node>[:<pri>]}*',
});