5
0
mirror of git://git.proxmox.com/git/proxmox-backup.git synced 2025-01-03 01:18:02 +03:00

tree-wide: fix various typos

found with codespell

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht 2023-11-29 18:32:06 +01:00
parent efbd1d488b
commit 6685122c3b
27 changed files with 40 additions and 40 deletions

View File

@ -37,7 +37,7 @@ How can I upgrade Proxmox Backup Server to the next point release?
Minor version upgrades, for example upgrading from Proxmox Backup Server in
version 3.1 to 3.2 or 3.3, can be done just like any normal update.
But, you should still check the `release notes
<https://pbs.proxmox.com/wiki/index.php/Roadmap>`_ for any relevant noteable,
<https://pbs.proxmox.com/wiki/index.php/Roadmap>`_ for any relevant notable,
or breaking change.
For the update itself use either the Web UI *Node -> Updates* panel or
@ -112,6 +112,6 @@ consumed and the impact on the network. Each backup still references all
data and such is a full backup. For details see the
:ref:`Technical Overview <tech_design_overview>`
.. todo:: document our stabillity guarantees, i.e., the separate one for, in
.. todo:: document our stability guarantees, i.e., the separate one for, in
increasing duration of how long we'll support it: api compat, backup
protocol compat and backup format compat

View File

@ -749,7 +749,7 @@ Ext.onReady(function() {
fieldLabel: 'End Time',
allowBlank: false,
format: 'H:i',
// cant bind value because ExtJS sets the year to 2008 to
// can't bind value because ExtJS sets the year to 2008 to
// protect against DST issues and date picker zeroes hour/minute
value: vm.get('now'),
listeners: {

View File

@ -195,7 +195,7 @@ pub enum DatastoreFSyncLevel {
/// while reducing the impact on many file systems in contrast to the file level sync.
/// Depending on the setup, it might have a negative impact on unrelated write operations
/// of the underlying filesystem, but it is generally a good compromise between performance
/// and consitency.
/// and consistency.
#[default]
Filesystem,
}

View File

@ -175,7 +175,7 @@ pub enum MetricServerType {
)]
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
#[serde(rename_all = "kebab-case")]
/// Basic information about a metric server thats available for all types
/// Basic information about a metric server that's available for all types
pub struct MetricServerInfo {
pub name: String,
#[serde(rename = "type")]

View File

@ -8,7 +8,7 @@ use crate::{
};
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema =
StringSchema::new("Timeframe to specify when the rule is actice.")
StringSchema::new("Timeframe to specify when the rule is active.")
.format(&DAILY_DURATION_FORMAT)
.schema();

View File

@ -374,7 +374,7 @@ pub struct Shell {
/// Interactive prompt.
prompt: String,
/// Calalog reader instance to navigate
/// Catalog reader instance to navigate
catalog: CatalogReader,
/// List of selected paths for restore

View File

@ -921,7 +921,7 @@ where
tarencoder
.add_entry(&mut header, path, tokio::io::empty())
.await
.context("coult not send fifo entry")?;
.context("could not send fifo entry")?;
}
EntryKind::Directory => {
log::debug!("adding '{}' to tar", path.display());

View File

@ -56,7 +56,7 @@ fn decode_volume_statistics(data: &[u8]) -> Result<Lp17VolumeStatistics, Error>
let read_be_counter = |reader: &mut &[u8], len: u8| {
let len = len as usize;
if len == 0 || len > 8 {
bail!("invalid conter size '{}'", len);
bail!("invalid counter size '{}'", len);
}
let mut buffer = [0u8; 8];
reader.read_exact(&mut buffer[..len])?;

View File

@ -1034,7 +1034,7 @@ async fn create_backup(
}
if dry_run {
log::info!("dry-run: no upload happend");
log::info!("dry-run: no upload happened");
return Ok(Value::Null);
}

View File

@ -124,10 +124,10 @@ pub fn update_rrd(path: String, time: Option<u64>, value: f64) -> Result<(), Err
type: CF,
},
resolution: {
description: "Time resulution",
description: "Time resolution",
},
start: {
description: "Start time. If not sepecified, we simply extract 10 data points.",
description: "Start time. If not specified, we simply extract 10 data points.",
optional: true,
},
end: {
@ -292,11 +292,11 @@ pub fn resize_rrd(path: String, rra_index: usize, slots: i64) -> Result<(), Erro
let new_slots = (rra.data.len() as i64) + slots;
if new_slots < 1 {
bail!("numer of new slots is too small ('{}' < 1)", new_slots);
bail!("number of new slots is too small ('{}' < 1)", new_slots);
}
if new_slots > 1024 * 1024 {
bail!("numer of new slots is too big ('{}' > 1M)", new_slots);
bail!("number of new slots is too big ('{}' > 1M)", new_slots);
}
let rra_end = rra.slot_end_time(rrd.source.last_update as u64);

View File

@ -102,7 +102,7 @@ impl RRDCache {
/// * cf=average,r=7*86400,n=570 => 10years
/// * cf=maximum,r=7*86400,n=570 => 10year
///
/// The resultion data file size is about 80KB.
/// The resulting data file size is about 80KB.
pub fn create_proxmox_backup_default_rrd(dst: DST) -> RRD {
let rra_list = vec![
// 1 min * 1440 => 1 day
@ -207,7 +207,7 @@ impl RRDCache {
/// Extract data from cached RRD
///
/// `start`: Start time. If not sepecified, we simply extract 10 data points.
/// `start`: Start time. If not specified, we simply extract 10 data points.
///
/// `end`: End time. Default is to use the current time.
pub fn extract_cached_data(

View File

@ -147,7 +147,7 @@ impl DataSource {
// we update last_value anyways, so that we can compute the diff
// next time.
self.last_value = value;
bail!("conter overflow/reset detected");
bail!("counter overflow/reset detected");
} else {
value - self.last_value
};

View File

@ -13,7 +13,7 @@ pub const PROXMOX_RRD_MAGIC_1_0: [u8; 8] = [206, 46, 26, 212, 172, 158, 5, 186];
use crate::rrd::{DataSource, CF, DST, RRA, RRD};
bitflags! {
/// Flags to specify the data soure type and consolidation function
/// Flags to specify the data source type and consolidation function
pub struct RRAFlags: u64 {
// Data Source Types
const DST_GAUGE = 1;
@ -34,9 +34,9 @@ bitflags! {
/// RRD files.
#[repr(C)]
pub struct RRAv1 {
/// Defined the data soure type and consolidation function
/// Defined the data source type and consolidation function
pub flags: RRAFlags,
/// Resulution (seconds)
/// Resolution (seconds)
pub resolution: u64,
/// Last update time (epoch)
pub last_update: f64,
@ -213,7 +213,7 @@ impl RRDv1 {
let (start, reso, data) = self.hour_max.extract_data();
day_max.insert_data(start, reso, data)?;
// compute montly average (merge old self.month_avg,
// compute monthly average (merge old self.month_avg,
// self.week_avg and self.day_avg)
let mut month_avg = RRA::new(CF::Average, 30 * 60, 1440);
@ -228,7 +228,7 @@ impl RRDv1 {
let (start, reso, data) = self.day_avg.extract_data();
month_avg.insert_data(start, reso, data)?;
// compute montly maximum (merge old self.month_max,
// compute monthly maximum (merge old self.month_max,
// self.week_max and self.day_max)
let mut month_max = RRA::new(CF::Maximum, 30 * 60, 1440);

View File

@ -73,7 +73,7 @@ fn pxar_create_and_extract() {
.unwrap_or_else(|err| panic!("Failed to invoke 'rm': {}", err));
// If source and destination folder contain the same content,
// the output of the rsync invokation should yield no lines.
// the output of the rsync invocation should yield no lines.
if linecount != 0 {
panic!("pxar create and extract did not yield the same contents");
}

View File

@ -17,7 +17,7 @@ use pbs_config::metrics;
async fn test_server(address: &str) -> Result<(), Error> {
test_influxdb_udp(address)
.await
.map_err(|err| format_err!("cannot conect to {}: {}", address, err))
.map_err(|err| format_err!("cannot connect to {}: {}", address, err))
}
#[api(

View File

@ -217,7 +217,7 @@ impl LdapAuthenticator {
}
}
/// Lookup the autenticator for the specified realm
/// Lookup the authenticator for the specified realm
pub(crate) fn lookup_authenticator(
realm: &RealmRef,
) -> Result<Box<dyn Authenticator + Send + Sync>, Error> {

View File

@ -125,7 +125,7 @@ pub fn dump_schema(schema: &Schema) -> Value {
data["format"] = dump_schema(subschema);
data["typetext"] = get_property_string_type_text(subschema).into();
}
_ => { /* do nothing - shouldnot happen */ }
_ => { /* do nothing - should not happen */ }
};
}
}

View File

@ -212,7 +212,7 @@ impl Checker {
self.output.log_warn(
"proxmox-boot-tool is used for bootloader configuration in uefi mode \
but the separate systemd-boot package, is not installed.\n\
initializing new ESPs will not work unitl the package is installed.",
initializing new ESPs will not work until the package is installed.",
)?;
return Ok(());
} else if !Path::new("/usr/share/doc/grub-efi-amd64/changelog.Debian.gz").is_file() {

View File

@ -352,7 +352,7 @@ fn visit_directory<'f, 'c>(
let first_chunk = index
.chunk_from_offset(range.start)
.context("Invalid offest")?
.context("Invalid offset")?
.0;
let last_chunk = index
.chunk_from_offset(range.end)
@ -447,7 +447,7 @@ async fn compare_file(
} else {
let content_identical = compare_file_contents(file_a, file_b).await?;
if content_identical && !changed.any_without_mtime() {
// If the content is identical and nothing, exluding mtime,
// If the content is identical and nothing, excluding mtime,
// has changed, we don't consider the entry as modified.
changed.mtime = false;
}
@ -792,7 +792,7 @@ impl FileEntryPrinter {
Ok(())
}
/// Print a file entry, including `changed` indicators and column seperators
/// Print a file entry, including `changed` indicators and column separators
pub fn print_file_entry(
&mut self,
entry: &FileEntry,

View File

@ -55,7 +55,7 @@ pub fn do_realm_sync_job(
Ok(upid_str)
}
/// Implemenation for syncing LDAP realms
/// Implementation for syncing LDAP realms
struct LdapRealmSyncJob {
worker: Arc<WorkerTask>,
realm: Realm,
@ -361,7 +361,7 @@ impl LdapRealmSyncJob {
}
}
/// General realm sync settings - Override for manual invokation
/// General realm sync settings - Override for manual invocation
struct GeneralSyncSettingsOverride {
remove_vanished: Option<String>,
enable_new: Option<bool>,

View File

@ -46,12 +46,12 @@ impl OnlineStatusMap {
})
}
/// Returns the assiciated changer name for a media.
/// Returns the associated changer name for a media.
pub fn lookup_changer(&self, uuid: &Uuid) -> Option<&String> {
self.changer_map.get(uuid)
}
/// Returns the map which assiciates media uuids with changer names.
/// Returns the map which associates media uuids with changer names.
pub fn changer_map(&self) -> &HashMap<Uuid, String> {
&self.changer_map
}

View File

@ -635,7 +635,7 @@ pub enum DiskUsageType {
#[api()]
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Baisc information about a partition
/// Basic information about a partition
pub struct PartitionInfo {
/// The partition name
pub name: String,

View File

@ -25,7 +25,7 @@ pub enum SmartStatus {
pub struct SmartAttribute {
/// Attribute name
name: String,
// FIXME: remove value with next major relase (PBS 3.0)
// FIXME: remove value with next major release (PBS 3.0)
/// duplicate of raw - kept for API stability
value: String,
/// Attribute raw value

View File

@ -71,7 +71,7 @@ Ext.define('PBS.Dashboard', {
let me = this;
let view = me.getView();
let status = records[0].data.status || 'unknown';
// 2 = all good, 1 = different leves, 0 = none
// 2 = all good, 1 = different levels, 0 = none
let subscriptionActive = status.toLowerCase() === 'active';
let subStatus = status.toLowerCase() === 'active' ? 2 : 0;
me.lookup('subscription').setSubStatus(subStatus);

View File

@ -307,7 +307,7 @@ Ext.define('PBS.Utils', {
return Ext.String.format(gettext("in {0}"), duration);
},
// FIXME: depreacted by Proxmox.Utils.render_size_usage ?!
// FIXME: deprecated by Proxmox.Utils.render_size_usage ?!
render_size_usage: function(val, max) {
if (max === 0) {
return gettext('N/A');

View File

@ -4,7 +4,7 @@ Ext.define('pve-rrd-datastore', {
'used',
'total',
{
name: 'unpriv-total', // Can't resuse 'total' here as that creates a stack overflow
name: 'unpriv-total', // Can't reuse 'total' here as that creates a stack overflow
calculate: function(data) {
let used = data.used;
let avail = data.available;

View File

@ -141,7 +141,7 @@ Ext.define('PBS.form.GroupFilter', {
view.dsStore.each(record => recs.push(record.data));
group.getStore().setData(recs);
// add a widget reference to the record so we can acces them from the other column
// add a widget reference to the record so we can access them from the other column
rec.widgets = {
type,
regex,