mirror of
git://git.proxmox.com/git/proxmox-backup.git
synced 2025-01-06 13:18:00 +03:00
tree-wide: fix various typos
found with codespell Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
efbd1d488b
commit
6685122c3b
@ -37,7 +37,7 @@ How can I upgrade Proxmox Backup Server to the next point release?
|
|||||||
Minor version upgrades, for example upgrading from Proxmox Backup Server in
|
Minor version upgrades, for example upgrading from Proxmox Backup Server in
|
||||||
version 3.1 to 3.2 or 3.3, can be done just like any normal update.
|
version 3.1 to 3.2 or 3.3, can be done just like any normal update.
|
||||||
But, you should still check the `release notes
|
But, you should still check the `release notes
|
||||||
<https://pbs.proxmox.com/wiki/index.php/Roadmap>`_ for any relevant noteable,
|
<https://pbs.proxmox.com/wiki/index.php/Roadmap>`_ for any relevant notable,
|
||||||
or breaking change.
|
or breaking change.
|
||||||
|
|
||||||
For the update itself use either the Web UI *Node -> Updates* panel or
|
For the update itself use either the Web UI *Node -> Updates* panel or
|
||||||
@ -112,6 +112,6 @@ consumed and the impact on the network. Each backup still references all
|
|||||||
data and such is a full backup. For details see the
|
data and such is a full backup. For details see the
|
||||||
:ref:`Technical Overview <tech_design_overview>`
|
:ref:`Technical Overview <tech_design_overview>`
|
||||||
|
|
||||||
.. todo:: document our stabillity guarantees, i.e., the separate one for, in
|
.. todo:: document our stability guarantees, i.e., the separate one for, in
|
||||||
increasing duration of how long we'll support it: api compat, backup
|
increasing duration of how long we'll support it: api compat, backup
|
||||||
protocol compat and backup format compat
|
protocol compat and backup format compat
|
||||||
|
@ -749,7 +749,7 @@ Ext.onReady(function() {
|
|||||||
fieldLabel: 'End Time',
|
fieldLabel: 'End Time',
|
||||||
allowBlank: false,
|
allowBlank: false,
|
||||||
format: 'H:i',
|
format: 'H:i',
|
||||||
// cant bind value because ExtJS sets the year to 2008 to
|
// can't bind value because ExtJS sets the year to 2008 to
|
||||||
// protect against DST issues and date picker zeroes hour/minute
|
// protect against DST issues and date picker zeroes hour/minute
|
||||||
value: vm.get('now'),
|
value: vm.get('now'),
|
||||||
listeners: {
|
listeners: {
|
||||||
|
@ -195,7 +195,7 @@ pub enum DatastoreFSyncLevel {
|
|||||||
/// while reducing the impact on many file systems in contrast to the file level sync.
|
/// while reducing the impact on many file systems in contrast to the file level sync.
|
||||||
/// Depending on the setup, it might have a negative impact on unrelated write operations
|
/// Depending on the setup, it might have a negative impact on unrelated write operations
|
||||||
/// of the underlying filesystem, but it is generally a good compromise between performance
|
/// of the underlying filesystem, but it is generally a good compromise between performance
|
||||||
/// and consitency.
|
/// and consistency.
|
||||||
#[default]
|
#[default]
|
||||||
Filesystem,
|
Filesystem,
|
||||||
}
|
}
|
||||||
|
@ -175,7 +175,7 @@ pub enum MetricServerType {
|
|||||||
)]
|
)]
|
||||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Basic information about a metric server thats available for all types
|
/// Basic information about a metric server that's available for all types
|
||||||
pub struct MetricServerInfo {
|
pub struct MetricServerInfo {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
#[serde(rename = "type")]
|
#[serde(rename = "type")]
|
||||||
|
@ -8,7 +8,7 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema =
|
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema =
|
||||||
StringSchema::new("Timeframe to specify when the rule is actice.")
|
StringSchema::new("Timeframe to specify when the rule is active.")
|
||||||
.format(&DAILY_DURATION_FORMAT)
|
.format(&DAILY_DURATION_FORMAT)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
@ -374,7 +374,7 @@ pub struct Shell {
|
|||||||
/// Interactive prompt.
|
/// Interactive prompt.
|
||||||
prompt: String,
|
prompt: String,
|
||||||
|
|
||||||
/// Calalog reader instance to navigate
|
/// Catalog reader instance to navigate
|
||||||
catalog: CatalogReader,
|
catalog: CatalogReader,
|
||||||
|
|
||||||
/// List of selected paths for restore
|
/// List of selected paths for restore
|
||||||
|
@ -921,7 +921,7 @@ where
|
|||||||
tarencoder
|
tarencoder
|
||||||
.add_entry(&mut header, path, tokio::io::empty())
|
.add_entry(&mut header, path, tokio::io::empty())
|
||||||
.await
|
.await
|
||||||
.context("coult not send fifo entry")?;
|
.context("could not send fifo entry")?;
|
||||||
}
|
}
|
||||||
EntryKind::Directory => {
|
EntryKind::Directory => {
|
||||||
log::debug!("adding '{}' to tar", path.display());
|
log::debug!("adding '{}' to tar", path.display());
|
||||||
|
@ -56,7 +56,7 @@ fn decode_volume_statistics(data: &[u8]) -> Result<Lp17VolumeStatistics, Error>
|
|||||||
let read_be_counter = |reader: &mut &[u8], len: u8| {
|
let read_be_counter = |reader: &mut &[u8], len: u8| {
|
||||||
let len = len as usize;
|
let len = len as usize;
|
||||||
if len == 0 || len > 8 {
|
if len == 0 || len > 8 {
|
||||||
bail!("invalid conter size '{}'", len);
|
bail!("invalid counter size '{}'", len);
|
||||||
}
|
}
|
||||||
let mut buffer = [0u8; 8];
|
let mut buffer = [0u8; 8];
|
||||||
reader.read_exact(&mut buffer[..len])?;
|
reader.read_exact(&mut buffer[..len])?;
|
||||||
|
@ -1034,7 +1034,7 @@ async fn create_backup(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if dry_run {
|
if dry_run {
|
||||||
log::info!("dry-run: no upload happend");
|
log::info!("dry-run: no upload happened");
|
||||||
return Ok(Value::Null);
|
return Ok(Value::Null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,10 +124,10 @@ pub fn update_rrd(path: String, time: Option<u64>, value: f64) -> Result<(), Err
|
|||||||
type: CF,
|
type: CF,
|
||||||
},
|
},
|
||||||
resolution: {
|
resolution: {
|
||||||
description: "Time resulution",
|
description: "Time resolution",
|
||||||
},
|
},
|
||||||
start: {
|
start: {
|
||||||
description: "Start time. If not sepecified, we simply extract 10 data points.",
|
description: "Start time. If not specified, we simply extract 10 data points.",
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
end: {
|
end: {
|
||||||
@ -292,11 +292,11 @@ pub fn resize_rrd(path: String, rra_index: usize, slots: i64) -> Result<(), Erro
|
|||||||
let new_slots = (rra.data.len() as i64) + slots;
|
let new_slots = (rra.data.len() as i64) + slots;
|
||||||
|
|
||||||
if new_slots < 1 {
|
if new_slots < 1 {
|
||||||
bail!("numer of new slots is too small ('{}' < 1)", new_slots);
|
bail!("number of new slots is too small ('{}' < 1)", new_slots);
|
||||||
}
|
}
|
||||||
|
|
||||||
if new_slots > 1024 * 1024 {
|
if new_slots > 1024 * 1024 {
|
||||||
bail!("numer of new slots is too big ('{}' > 1M)", new_slots);
|
bail!("number of new slots is too big ('{}' > 1M)", new_slots);
|
||||||
}
|
}
|
||||||
|
|
||||||
let rra_end = rra.slot_end_time(rrd.source.last_update as u64);
|
let rra_end = rra.slot_end_time(rrd.source.last_update as u64);
|
||||||
|
@ -102,7 +102,7 @@ impl RRDCache {
|
|||||||
/// * cf=average,r=7*86400,n=570 => 10years
|
/// * cf=average,r=7*86400,n=570 => 10years
|
||||||
/// * cf=maximum,r=7*86400,n=570 => 10year
|
/// * cf=maximum,r=7*86400,n=570 => 10year
|
||||||
///
|
///
|
||||||
/// The resultion data file size is about 80KB.
|
/// The resulting data file size is about 80KB.
|
||||||
pub fn create_proxmox_backup_default_rrd(dst: DST) -> RRD {
|
pub fn create_proxmox_backup_default_rrd(dst: DST) -> RRD {
|
||||||
let rra_list = vec![
|
let rra_list = vec![
|
||||||
// 1 min * 1440 => 1 day
|
// 1 min * 1440 => 1 day
|
||||||
@ -207,7 +207,7 @@ impl RRDCache {
|
|||||||
|
|
||||||
/// Extract data from cached RRD
|
/// Extract data from cached RRD
|
||||||
///
|
///
|
||||||
/// `start`: Start time. If not sepecified, we simply extract 10 data points.
|
/// `start`: Start time. If not specified, we simply extract 10 data points.
|
||||||
///
|
///
|
||||||
/// `end`: End time. Default is to use the current time.
|
/// `end`: End time. Default is to use the current time.
|
||||||
pub fn extract_cached_data(
|
pub fn extract_cached_data(
|
||||||
|
@ -147,7 +147,7 @@ impl DataSource {
|
|||||||
// we update last_value anyways, so that we can compute the diff
|
// we update last_value anyways, so that we can compute the diff
|
||||||
// next time.
|
// next time.
|
||||||
self.last_value = value;
|
self.last_value = value;
|
||||||
bail!("conter overflow/reset detected");
|
bail!("counter overflow/reset detected");
|
||||||
} else {
|
} else {
|
||||||
value - self.last_value
|
value - self.last_value
|
||||||
};
|
};
|
||||||
|
@ -13,7 +13,7 @@ pub const PROXMOX_RRD_MAGIC_1_0: [u8; 8] = [206, 46, 26, 212, 172, 158, 5, 186];
|
|||||||
use crate::rrd::{DataSource, CF, DST, RRA, RRD};
|
use crate::rrd::{DataSource, CF, DST, RRA, RRD};
|
||||||
|
|
||||||
bitflags! {
|
bitflags! {
|
||||||
/// Flags to specify the data soure type and consolidation function
|
/// Flags to specify the data source type and consolidation function
|
||||||
pub struct RRAFlags: u64 {
|
pub struct RRAFlags: u64 {
|
||||||
// Data Source Types
|
// Data Source Types
|
||||||
const DST_GAUGE = 1;
|
const DST_GAUGE = 1;
|
||||||
@ -34,9 +34,9 @@ bitflags! {
|
|||||||
/// RRD files.
|
/// RRD files.
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
pub struct RRAv1 {
|
pub struct RRAv1 {
|
||||||
/// Defined the data soure type and consolidation function
|
/// Defined the data source type and consolidation function
|
||||||
pub flags: RRAFlags,
|
pub flags: RRAFlags,
|
||||||
/// Resulution (seconds)
|
/// Resolution (seconds)
|
||||||
pub resolution: u64,
|
pub resolution: u64,
|
||||||
/// Last update time (epoch)
|
/// Last update time (epoch)
|
||||||
pub last_update: f64,
|
pub last_update: f64,
|
||||||
@ -213,7 +213,7 @@ impl RRDv1 {
|
|||||||
let (start, reso, data) = self.hour_max.extract_data();
|
let (start, reso, data) = self.hour_max.extract_data();
|
||||||
day_max.insert_data(start, reso, data)?;
|
day_max.insert_data(start, reso, data)?;
|
||||||
|
|
||||||
// compute montly average (merge old self.month_avg,
|
// compute monthly average (merge old self.month_avg,
|
||||||
// self.week_avg and self.day_avg)
|
// self.week_avg and self.day_avg)
|
||||||
let mut month_avg = RRA::new(CF::Average, 30 * 60, 1440);
|
let mut month_avg = RRA::new(CF::Average, 30 * 60, 1440);
|
||||||
|
|
||||||
@ -228,7 +228,7 @@ impl RRDv1 {
|
|||||||
let (start, reso, data) = self.day_avg.extract_data();
|
let (start, reso, data) = self.day_avg.extract_data();
|
||||||
month_avg.insert_data(start, reso, data)?;
|
month_avg.insert_data(start, reso, data)?;
|
||||||
|
|
||||||
// compute montly maximum (merge old self.month_max,
|
// compute monthly maximum (merge old self.month_max,
|
||||||
// self.week_max and self.day_max)
|
// self.week_max and self.day_max)
|
||||||
let mut month_max = RRA::new(CF::Maximum, 30 * 60, 1440);
|
let mut month_max = RRA::new(CF::Maximum, 30 * 60, 1440);
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ fn pxar_create_and_extract() {
|
|||||||
.unwrap_or_else(|err| panic!("Failed to invoke 'rm': {}", err));
|
.unwrap_or_else(|err| panic!("Failed to invoke 'rm': {}", err));
|
||||||
|
|
||||||
// If source and destination folder contain the same content,
|
// If source and destination folder contain the same content,
|
||||||
// the output of the rsync invokation should yield no lines.
|
// the output of the rsync invocation should yield no lines.
|
||||||
if linecount != 0 {
|
if linecount != 0 {
|
||||||
panic!("pxar create and extract did not yield the same contents");
|
panic!("pxar create and extract did not yield the same contents");
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@ use pbs_config::metrics;
|
|||||||
async fn test_server(address: &str) -> Result<(), Error> {
|
async fn test_server(address: &str) -> Result<(), Error> {
|
||||||
test_influxdb_udp(address)
|
test_influxdb_udp(address)
|
||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("cannot conect to {}: {}", address, err))
|
.map_err(|err| format_err!("cannot connect to {}: {}", address, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
@ -217,7 +217,7 @@ impl LdapAuthenticator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Lookup the autenticator for the specified realm
|
/// Lookup the authenticator for the specified realm
|
||||||
pub(crate) fn lookup_authenticator(
|
pub(crate) fn lookup_authenticator(
|
||||||
realm: &RealmRef,
|
realm: &RealmRef,
|
||||||
) -> Result<Box<dyn Authenticator + Send + Sync>, Error> {
|
) -> Result<Box<dyn Authenticator + Send + Sync>, Error> {
|
||||||
|
@ -125,7 +125,7 @@ pub fn dump_schema(schema: &Schema) -> Value {
|
|||||||
data["format"] = dump_schema(subschema);
|
data["format"] = dump_schema(subschema);
|
||||||
data["typetext"] = get_property_string_type_text(subschema).into();
|
data["typetext"] = get_property_string_type_text(subschema).into();
|
||||||
}
|
}
|
||||||
_ => { /* do nothing - shouldnot happen */ }
|
_ => { /* do nothing - should not happen */ }
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -212,7 +212,7 @@ impl Checker {
|
|||||||
self.output.log_warn(
|
self.output.log_warn(
|
||||||
"proxmox-boot-tool is used for bootloader configuration in uefi mode \
|
"proxmox-boot-tool is used for bootloader configuration in uefi mode \
|
||||||
but the separate systemd-boot package, is not installed.\n\
|
but the separate systemd-boot package, is not installed.\n\
|
||||||
initializing new ESPs will not work unitl the package is installed.",
|
initializing new ESPs will not work until the package is installed.",
|
||||||
)?;
|
)?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
} else if !Path::new("/usr/share/doc/grub-efi-amd64/changelog.Debian.gz").is_file() {
|
} else if !Path::new("/usr/share/doc/grub-efi-amd64/changelog.Debian.gz").is_file() {
|
||||||
|
@ -352,7 +352,7 @@ fn visit_directory<'f, 'c>(
|
|||||||
|
|
||||||
let first_chunk = index
|
let first_chunk = index
|
||||||
.chunk_from_offset(range.start)
|
.chunk_from_offset(range.start)
|
||||||
.context("Invalid offest")?
|
.context("Invalid offset")?
|
||||||
.0;
|
.0;
|
||||||
let last_chunk = index
|
let last_chunk = index
|
||||||
.chunk_from_offset(range.end)
|
.chunk_from_offset(range.end)
|
||||||
@ -447,7 +447,7 @@ async fn compare_file(
|
|||||||
} else {
|
} else {
|
||||||
let content_identical = compare_file_contents(file_a, file_b).await?;
|
let content_identical = compare_file_contents(file_a, file_b).await?;
|
||||||
if content_identical && !changed.any_without_mtime() {
|
if content_identical && !changed.any_without_mtime() {
|
||||||
// If the content is identical and nothing, exluding mtime,
|
// If the content is identical and nothing, excluding mtime,
|
||||||
// has changed, we don't consider the entry as modified.
|
// has changed, we don't consider the entry as modified.
|
||||||
changed.mtime = false;
|
changed.mtime = false;
|
||||||
}
|
}
|
||||||
@ -792,7 +792,7 @@ impl FileEntryPrinter {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Print a file entry, including `changed` indicators and column seperators
|
/// Print a file entry, including `changed` indicators and column separators
|
||||||
pub fn print_file_entry(
|
pub fn print_file_entry(
|
||||||
&mut self,
|
&mut self,
|
||||||
entry: &FileEntry,
|
entry: &FileEntry,
|
||||||
|
@ -55,7 +55,7 @@ pub fn do_realm_sync_job(
|
|||||||
Ok(upid_str)
|
Ok(upid_str)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Implemenation for syncing LDAP realms
|
/// Implementation for syncing LDAP realms
|
||||||
struct LdapRealmSyncJob {
|
struct LdapRealmSyncJob {
|
||||||
worker: Arc<WorkerTask>,
|
worker: Arc<WorkerTask>,
|
||||||
realm: Realm,
|
realm: Realm,
|
||||||
@ -361,7 +361,7 @@ impl LdapRealmSyncJob {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// General realm sync settings - Override for manual invokation
|
/// General realm sync settings - Override for manual invocation
|
||||||
struct GeneralSyncSettingsOverride {
|
struct GeneralSyncSettingsOverride {
|
||||||
remove_vanished: Option<String>,
|
remove_vanished: Option<String>,
|
||||||
enable_new: Option<bool>,
|
enable_new: Option<bool>,
|
||||||
|
@ -46,12 +46,12 @@ impl OnlineStatusMap {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the assiciated changer name for a media.
|
/// Returns the associated changer name for a media.
|
||||||
pub fn lookup_changer(&self, uuid: &Uuid) -> Option<&String> {
|
pub fn lookup_changer(&self, uuid: &Uuid) -> Option<&String> {
|
||||||
self.changer_map.get(uuid)
|
self.changer_map.get(uuid)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the map which assiciates media uuids with changer names.
|
/// Returns the map which associates media uuids with changer names.
|
||||||
pub fn changer_map(&self) -> &HashMap<Uuid, String> {
|
pub fn changer_map(&self) -> &HashMap<Uuid, String> {
|
||||||
&self.changer_map
|
&self.changer_map
|
||||||
}
|
}
|
||||||
|
@ -635,7 +635,7 @@ pub enum DiskUsageType {
|
|||||||
#[api()]
|
#[api()]
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Baisc information about a partition
|
/// Basic information about a partition
|
||||||
pub struct PartitionInfo {
|
pub struct PartitionInfo {
|
||||||
/// The partition name
|
/// The partition name
|
||||||
pub name: String,
|
pub name: String,
|
||||||
|
@ -25,7 +25,7 @@ pub enum SmartStatus {
|
|||||||
pub struct SmartAttribute {
|
pub struct SmartAttribute {
|
||||||
/// Attribute name
|
/// Attribute name
|
||||||
name: String,
|
name: String,
|
||||||
// FIXME: remove value with next major relase (PBS 3.0)
|
// FIXME: remove value with next major release (PBS 3.0)
|
||||||
/// duplicate of raw - kept for API stability
|
/// duplicate of raw - kept for API stability
|
||||||
value: String,
|
value: String,
|
||||||
/// Attribute raw value
|
/// Attribute raw value
|
||||||
|
@ -71,7 +71,7 @@ Ext.define('PBS.Dashboard', {
|
|||||||
let me = this;
|
let me = this;
|
||||||
let view = me.getView();
|
let view = me.getView();
|
||||||
let status = records[0].data.status || 'unknown';
|
let status = records[0].data.status || 'unknown';
|
||||||
// 2 = all good, 1 = different leves, 0 = none
|
// 2 = all good, 1 = different levels, 0 = none
|
||||||
let subscriptionActive = status.toLowerCase() === 'active';
|
let subscriptionActive = status.toLowerCase() === 'active';
|
||||||
let subStatus = status.toLowerCase() === 'active' ? 2 : 0;
|
let subStatus = status.toLowerCase() === 'active' ? 2 : 0;
|
||||||
me.lookup('subscription').setSubStatus(subStatus);
|
me.lookup('subscription').setSubStatus(subStatus);
|
||||||
|
@ -307,7 +307,7 @@ Ext.define('PBS.Utils', {
|
|||||||
return Ext.String.format(gettext("in {0}"), duration);
|
return Ext.String.format(gettext("in {0}"), duration);
|
||||||
},
|
},
|
||||||
|
|
||||||
// FIXME: depreacted by Proxmox.Utils.render_size_usage ?!
|
// FIXME: deprecated by Proxmox.Utils.render_size_usage ?!
|
||||||
render_size_usage: function(val, max) {
|
render_size_usage: function(val, max) {
|
||||||
if (max === 0) {
|
if (max === 0) {
|
||||||
return gettext('N/A');
|
return gettext('N/A');
|
||||||
|
@ -4,7 +4,7 @@ Ext.define('pve-rrd-datastore', {
|
|||||||
'used',
|
'used',
|
||||||
'total',
|
'total',
|
||||||
{
|
{
|
||||||
name: 'unpriv-total', // Can't resuse 'total' here as that creates a stack overflow
|
name: 'unpriv-total', // Can't reuse 'total' here as that creates a stack overflow
|
||||||
calculate: function(data) {
|
calculate: function(data) {
|
||||||
let used = data.used;
|
let used = data.used;
|
||||||
let avail = data.available;
|
let avail = data.available;
|
||||||
|
@ -141,7 +141,7 @@ Ext.define('PBS.form.GroupFilter', {
|
|||||||
view.dsStore.each(record => recs.push(record.data));
|
view.dsStore.each(record => recs.push(record.data));
|
||||||
group.getStore().setData(recs);
|
group.getStore().setData(recs);
|
||||||
|
|
||||||
// add a widget reference to the record so we can acces them from the other column
|
// add a widget reference to the record so we can access them from the other column
|
||||||
rec.widgets = {
|
rec.widgets = {
|
||||||
type,
|
type,
|
||||||
regex,
|
regex,
|
||||||
|
Loading…
Reference in New Issue
Block a user