Compare commits
472 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
6565199af4 | ||
|
168ed37026 | ||
|
2c9f3a63d5 | ||
|
eba172a492 | ||
|
cec8c75cd0 | ||
|
ddf0489abb | ||
|
22285d0d01 | ||
|
f37ce33164 | ||
|
2c89b88226 | ||
|
cdc2b341b6 | ||
|
5117a21ec9 | ||
|
883e14ebcb | ||
|
858744bf3c | ||
|
582ba899b6 | ||
|
f098814876 | ||
|
62ff4f2472 | ||
|
7cae3e44f2 | ||
|
9d4d1216e3 | ||
|
d8881be658 | ||
|
c7a29011fa | ||
|
abad8e25c4 | ||
|
6c2b039ef4 | ||
|
64cfb13193 | ||
|
d986714201 | ||
|
1b5436ccdd | ||
|
c6600acf0b | ||
|
c9cd520a1a | ||
|
e0e644f119 | ||
|
5863e5ff5d | ||
|
46d4ceef77 | ||
|
afd22455da | ||
|
5ba351bac7 | ||
|
961c81bdeb | ||
|
af18706fcb | ||
|
ce8d56a3b5 | ||
|
1f24167b4d | ||
|
d4468ba6f8 | ||
|
600ce36d57 | ||
|
1cf52c6bb3 | ||
|
95d8e70c84 | ||
|
b249e44a0e | ||
|
d93d782d37 | ||
|
d910543d56 | ||
|
41588772c9 | ||
|
ed03985bd6 | ||
|
7769be2f17 | ||
|
de875c0f0e | ||
|
f1a5808e67 | ||
|
c4c050dc36 | ||
|
fd6cdeebea | ||
|
414f3656a8 | ||
|
0185228ad7 | ||
|
b72bdf4156 | ||
|
4773f6b721 | ||
|
40ef2afe01 | ||
|
c312d58488 | ||
|
34fbf1a809 | ||
|
c676439a15 | ||
|
ed8bc69a50 | ||
|
c57ac02879 | ||
|
668b8383a7 | ||
|
c69d18626a | ||
|
08d136e069 | ||
|
bf063e4494 | ||
|
d430b05ec3 | ||
|
f55a08891e | ||
|
77c81bcb31 | ||
|
cf0aaec985 | ||
|
7c570bac70 | ||
|
1874857dc2 | ||
|
8eaeedf31e | ||
|
c17964e7fc | ||
|
5d60f8692a | ||
|
61d18bcf9c | ||
|
2bacfa7029 | ||
|
109e063a7e | ||
|
47a29b1896 | ||
|
0083e7ac05 | ||
|
00254d60e3 | ||
|
d11393c70e | ||
|
77fd1853b3 | ||
|
a50e0014df | ||
|
d61bac6841 | ||
|
cdeed5e440 | ||
|
acddd3f09a | ||
|
414a5b3a3a | ||
|
80264dbfaa | ||
|
ff9e36b431 | ||
|
7eee253d8c | ||
|
2dff4d2d6d | ||
|
23e91fdd98 | ||
|
c68117d0a1 | ||
|
de44cb7b47 | ||
|
81635877e2 | ||
|
f36e8fea91 | ||
|
2fab9155b3 | ||
|
b711ccf0ad | ||
|
38d961f9e4 | ||
|
6ab04f14ae | ||
|
269b7bffc7 | ||
|
f418479aaa | ||
|
c7cf3b424a | ||
|
eb21f639f2 | ||
|
54308a12b3 | ||
|
af037dd25d | ||
|
d3910e1334 | ||
|
a7792e16c5 | ||
|
353711199e | ||
|
584068893a | ||
|
87c648018d | ||
|
d8777c0f9b | ||
|
e5f2903981 | ||
|
1b4426feec | ||
|
4d5e14c07e | ||
|
93bdba1ac6 | ||
|
5a6aff6ad5 | ||
|
614b5b6713 | ||
|
4948864a07 | ||
|
13fe842041 | ||
|
4eadbcc49f | ||
|
5eb51c75ca | ||
|
0e21bb2482 | ||
|
0fe9fd8dd0 | ||
|
4869ec3bd3 | ||
|
17b7ab8021 | ||
|
00ced7808b | ||
|
87f2087789 | ||
|
0ca7833bc5 | ||
|
41b8bf2aff | ||
|
0fe805b95f | ||
|
363e32a805 | ||
|
c5c7fd3482 | ||
|
6bd63b0e71 | ||
|
63da9f8397 | ||
|
963401348a | ||
|
7e1aa4d283 | ||
|
bd25fc40a6 | ||
|
bb367c4d2e | ||
|
a5f3d4a21c | ||
|
af4d5607f1 | ||
|
b3f16f6227 | ||
|
e9dfb83131 | ||
|
e066bd7207 | ||
|
4b76b731cb | ||
|
b5814a4142 | ||
|
3b3d63ccfd | ||
|
5b11e52b08 | ||
|
e302382890 | ||
|
28d6afc2d7 | ||
|
403ad1f6d1 | ||
|
23185135bb | ||
|
d394c33a0c | ||
|
0db4d9031b | ||
|
d9f36232f1 | ||
|
3b9cf7b7a1 | ||
|
8c7492b99b | ||
|
e90baeaaa8 | ||
|
feeace2696 | ||
|
caba859692 | ||
|
a910ee8c0d | ||
|
67a7e3c3eb | ||
|
1a0eec9469 | ||
|
80c9afae4e | ||
|
20e58c056f | ||
|
8dccdeb942 | ||
|
9c4a934c71 | ||
|
6a9aa3b9f4 | ||
|
aa8f7f6208 | ||
|
5a52d1f06c | ||
|
f73bc28f03 | ||
|
4174dafd32 | ||
|
779f82ebdf | ||
|
668d8dfda4 | ||
|
fc2d288434 | ||
|
2de3d5385c | ||
|
d11deccff1 | ||
|
f784201c63 | ||
|
127de88a95 | ||
|
09155de386 | ||
|
18d4c4fc35 | ||
|
c8835f5882 | ||
|
42e3b2f12a | ||
|
43466bf538 | ||
|
94a068e316 | ||
|
f0e1cb86d6 | ||
|
703a822c97 | ||
|
aaac857282 | ||
|
b1b6489233 | ||
|
c74b289174 | ||
|
9cafa1775b | ||
|
51148a0b1e | ||
|
62963e6452 | ||
|
e8eeee0b52 | ||
|
b17ebd5c2c | ||
|
2f874935b5 | ||
|
919925519a | ||
|
76609915d6 | ||
|
40a2b110bf | ||
|
91c67298f4 | ||
|
2b31406a37 | ||
|
46d7e573a9 | ||
|
66389b2fd9 | ||
|
652b774eb0 | ||
|
89c650b83e | ||
|
ffc8265e1f | ||
|
4ef241a63b | ||
|
37440cd93a | ||
|
d399fe50da | ||
|
ccf08921ee | ||
|
dea876fd5e | ||
|
28028b15b7 | ||
|
d3f2e69cad | ||
|
fb5b6f3eab | ||
|
9f3733c5ed | ||
|
6a619b2488 | ||
|
674ae4947b | ||
|
b0448d0ad1 | ||
|
eb126116ca | ||
|
5cacfe02da | ||
|
d363818641 | ||
|
391822f9ce | ||
|
2009d8de41 | ||
|
1bb680017b | ||
|
70545af183 | ||
|
33031f9835 | ||
|
45b5556765 | ||
|
a6c3192233 | ||
|
75c695bea4 | ||
|
8e057c3874 | ||
|
19818d1449 | ||
|
590187ff53 | ||
|
0974ddfa17 | ||
|
b5be65cf8a | ||
|
3dc9d2de69 | ||
|
62228d39f2 | ||
|
83810759ee | ||
|
db5bf33cfe | ||
|
7ad5ad82e5 | ||
|
6771869cc1 | ||
|
addfae26cf | ||
|
e932ec101e | ||
|
cbf7bbefb7 | ||
|
4e37c678dc | ||
|
da11d22610 | ||
|
6265b1103a | ||
|
2134a2af48 | ||
|
935368a62f | ||
|
ffd52fbeeb | ||
|
cc6fc6a540 | ||
|
41b97b2454 | ||
|
cd933e9d69 | ||
|
ec4ffa924a | ||
|
98ac310845 | ||
|
1964cbdaad | ||
|
e70c389918 | ||
|
e3f2756cbb | ||
|
efb49d8abe | ||
|
102ab18146 | ||
|
1b9df4ba4f | ||
|
1be78aad72 | ||
|
adbf59dd17 | ||
|
5990728ec9 | ||
|
0679c25ebb | ||
|
c02d3a8717 | ||
|
e0ddb88cb7 | ||
|
a5350595fc | ||
|
a3a0c7dbe7 | ||
|
a304ed7c01 | ||
|
2492083e37 | ||
|
0bcbb1badd | ||
|
56ab13f0e2 | ||
|
27b8321f2a | ||
|
2031aa4bec | ||
|
70acf0f1df | ||
|
90900fd017 | ||
|
89ef8bf502 | ||
|
162ff15378 | ||
|
aec0ef6260 | ||
|
44999809b0 | ||
|
00f441eb93 | ||
|
4e50ef5193 | ||
|
5462d9d44d | ||
|
9aa213b88e | ||
|
262395abaf | ||
|
0b965ec115 | ||
|
43a92c8c1b | ||
|
964162ce35 | ||
|
4a1fa30a6f | ||
|
397e9c9991 | ||
|
c9078b189c | ||
|
5876a963b8 | ||
|
46951c103b | ||
|
e898887f54 | ||
|
bcd80bf976 | ||
|
33737196b1 | ||
|
a926803b92 | ||
|
0be5b147d5 | ||
|
f982c915f5 | ||
|
d1e5e4533c | ||
|
db4f1f64b6 | ||
|
da0fd4267a | ||
|
ae56a50b9d | ||
|
aa273905d7 | ||
|
732d9d7a5f | ||
|
a5e3032d36 | ||
|
008b38bfc7 | ||
|
9fbe870d1c | ||
|
c6648d59c6 | ||
|
19a621ab98 | ||
|
72fe4cdb79 | ||
|
73b18b279d | ||
|
4983a3c0ba | ||
|
936ec6b69e | ||
|
01bbaef7fa | ||
|
9ab2e4e710 | ||
|
79db26d316 | ||
|
2febc83cc0 | ||
|
b3675d867f | ||
|
76504bfcac | ||
|
7465ccd097 | ||
|
974b4527e2 | ||
|
65574209ad | ||
|
1a0229b881 | ||
|
b3d9b6d5f1 | ||
|
da82aca849 | ||
|
7f193b88ed | ||
|
720bf707e8 | ||
|
f2ea424cc1 | ||
|
6e101ff757 | ||
|
27811f3f8f | ||
|
6391a45b43 | ||
|
667797ce2e | ||
|
dcd863e0c9 | ||
|
faa08f6564 | ||
|
868ca01a7a | ||
|
b752b8cb96 | ||
|
1e36930e0b | ||
|
59243d200e | ||
|
dd16eabe19 | ||
|
5ddd59e167 | ||
|
78e4098eae | ||
|
8b9bae1ef1 | ||
|
d530ba080d | ||
|
0a32544585 | ||
|
50d20e9b64 | ||
|
3e6318a535 | ||
|
3a6755363b | ||
|
113c04bc60 | ||
|
84c066297c | ||
|
2d4209d9ef | ||
|
32dad63696 | ||
|
c606fdaa88 | ||
|
6c44f3e584 | ||
|
d2b6c75fa1 | ||
|
1836c135bc | ||
|
082c37b5a6 | ||
|
521647436d | ||
|
3c9fe358cc | ||
|
45c9383e94 | ||
|
a3e87f5a03 | ||
|
1d746a2c02 | ||
|
fb378fe543 | ||
|
4ee9264b00 | ||
|
a61272c7ff | ||
|
8038f96a53 | ||
|
2feb4160f1 | ||
|
da409e0a62 | ||
|
c804763bdf | ||
|
da12adb1f9 | ||
|
20753e1b53 | ||
|
8ecbb5f152 | ||
|
3547cfb63e | ||
|
98cb8ff86b | ||
|
da3612fade | ||
|
e8c70ec252 | ||
|
b7ac1fc8aa | ||
|
0a852e1927 | ||
|
045fc7750c | ||
|
b862c872e0 | ||
|
72e1181830 | ||
|
d87c9771e4 | ||
|
a4f08cbbbb | ||
|
2f05d211c4 | ||
|
610dd9b8f3 | ||
|
3f52a94624 | ||
|
ffe1dd4369 | ||
|
0a916665ae | ||
|
6950c7e4ef | ||
|
93e9e8b6ef | ||
|
79ed296f2d | ||
|
0415304ca4 | ||
|
d1a5855e74 | ||
|
bd8c677eab | ||
|
c3b7862be5 | ||
|
ec1e78a4df | ||
|
1de4974eeb | ||
|
e97132bb64 | ||
|
dc0de0db10 | ||
|
ff485aa320 | ||
|
8f27262d42 | ||
|
2801fbf03c | ||
|
4c0a8bc054 | ||
|
e9152ee951 | ||
|
baacc3f2de | ||
|
631b09b2eb | ||
|
8f58b0bf60 | ||
|
bfca4f272d | ||
|
59d9e62307 | ||
|
ea3047b2c6 | ||
|
0aab7980fc | ||
|
2443b3f8d0 | ||
|
0a005b092c | ||
|
17c82d4a73 | ||
|
8dc1b5abf7 | ||
|
a637e7f490 | ||
|
7549722640 | ||
|
81b40e1421 | ||
|
a480089bc9 | ||
|
e1220b02ad | ||
|
849c2deeb8 | ||
|
03412aaa5b | ||
|
a47b71a9ce | ||
|
12a141a727 | ||
|
4963c05f40 | ||
|
f9843eec16 | ||
|
4fa99a164d | ||
|
f629a56c47 | ||
|
a0ec3a9e14 | ||
|
05d22be1cf | ||
|
1c395ad195 | ||
|
8caf3f9f57 | ||
|
98c4056eaa | ||
|
aae596ee18 | ||
|
e1d92bce57 | ||
|
69b8b4b02f | ||
|
b43845aa07 | ||
|
42e5be0f87 | ||
|
19dfc86198 | ||
|
72478171cf | ||
|
1198253b20 | ||
|
a62a9f098d | ||
|
ab8e84498d | ||
|
5985905eb8 | ||
|
fa487e5352 | ||
|
3f325047dc | ||
|
96b7812b6a | ||
|
eb44bdb842 | ||
|
deb237a288 | ||
|
00ce0e38bd | ||
|
396806b211 | ||
|
b5b0b87eef | ||
|
c7275ede6d | ||
|
ce9b933556 | ||
|
1c81ffdefc | ||
|
077c1a9979 | ||
|
16170ef91d | ||
|
fcccc3dfa5 | ||
|
2ecdbe9a96 | ||
|
6e3f844f9a | ||
|
c052040028 | ||
|
c1689192d9 | ||
|
0e9aa78bf4 | ||
|
625e2fd95f | ||
|
1134a14166 | ||
|
c4f2bb70da | ||
|
3b2ade778f | ||
|
7e2486e800 | ||
|
cd25c36d21 | ||
|
88f3ccb96c | ||
|
68ec9356ec | ||
|
55e7bef4d2 | ||
|
da2002eadb |
78
Cargo.toml
@ -1,5 +1,5 @@
|
||||
[workspace.package]
|
||||
version = "3.2.7"
|
||||
version = "3.3.4"
|
||||
authors = [
|
||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||
@ -13,6 +13,7 @@ authors = [
|
||||
edition = "2021"
|
||||
license = "AGPL-3"
|
||||
repository = "https://git.proxmox.com/?p=proxmox-backup.git"
|
||||
rust-version = "1.80"
|
||||
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
@ -28,7 +29,6 @@ exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
"pbs-api-types",
|
||||
"pbs-buildcfg",
|
||||
"pbs-client",
|
||||
"pbs-config",
|
||||
@ -53,43 +53,51 @@ path = "src/lib.rs"
|
||||
|
||||
[workspace.dependencies]
|
||||
# proxmox workspace
|
||||
proxmox-apt = "0.10.5"
|
||||
proxmox-apt = { version = "0.11", features = [ "cache" ] }
|
||||
proxmox-apt-api-types = "1.0.1"
|
||||
proxmox-async = "0.4"
|
||||
proxmox-auth-api = "0.4"
|
||||
proxmox-borrow = "1"
|
||||
proxmox-compression = "0.2"
|
||||
proxmox-config-digest = "0.1.0"
|
||||
proxmox-daemon = "0.1.0"
|
||||
proxmox-fuse = "0.1.3"
|
||||
proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "websocket" ] } # see below
|
||||
proxmox-human-byte = "0.1"
|
||||
proxmox-io = "1.0.1" # tools and client use "tokio" feature
|
||||
proxmox-lang = "1.1"
|
||||
proxmox-log = "0.2.6"
|
||||
proxmox-ldap = "0.2.1"
|
||||
proxmox-metrics = "0.3.1"
|
||||
proxmox-notify = "0.4"
|
||||
proxmox-notify = "0.5.1"
|
||||
proxmox-openid = "0.10.0"
|
||||
proxmox-rest-server = { version = "0.5.1", features = [ "templates" ] }
|
||||
proxmox-rest-server = { version = "0.8.5", features = [ "templates" ] }
|
||||
# some use "cli", some use "cli" and "server", pbs-config uses nothing
|
||||
proxmox-router = { version = "2.0.0", default-features = false }
|
||||
proxmox-rrd = { version = "0.2" }
|
||||
proxmox-router = { version = "3.0.0", default-features = false }
|
||||
proxmox-rrd = "0.4"
|
||||
proxmox-rrd-api-types = "1.0.2"
|
||||
# everything but pbs-config and pbs-client use "api-macro"
|
||||
proxmox-schema = "3"
|
||||
proxmox-schema = "4"
|
||||
proxmox-section-config = "2"
|
||||
proxmox-serde = "0.1.1"
|
||||
proxmox-shared-cache = "0.1"
|
||||
proxmox-shared-memory = "0.3.0"
|
||||
proxmox-sortable-macro = "0.1.2"
|
||||
proxmox-subscription = { version = "0.4.2", features = [ "api-types" ] }
|
||||
proxmox-sys = "0.5.7"
|
||||
proxmox-tfa = { version = "4.0.4", features = [ "api", "api-types" ] }
|
||||
proxmox-subscription = { version = "0.5.0", features = [ "api-types" ] }
|
||||
proxmox-sys = "0.6.5"
|
||||
proxmox-systemd = "0.1"
|
||||
proxmox-tfa = { version = "5", features = [ "api", "api-types" ] }
|
||||
proxmox-time = "2"
|
||||
proxmox-uuid = "1"
|
||||
proxmox-worker-task = "0.1"
|
||||
pbs-api-types = "0.2.0"
|
||||
|
||||
# other proxmox crates
|
||||
pathpatterns = "0.3"
|
||||
proxmox-acme = "0.5"
|
||||
pxar = "0.12"
|
||||
proxmox-acme = "0.5.3"
|
||||
pxar = "0.12.1"
|
||||
|
||||
# PBS workspace
|
||||
pbs-api-types = { path = "pbs-api-types" }
|
||||
pbs-buildcfg = { path = "pbs-buildcfg" }
|
||||
pbs-client = { path = "pbs-client" }
|
||||
pbs-config = { path = "pbs-config" }
|
||||
@ -112,16 +120,14 @@ crc32fast = "1"
|
||||
const_format = "0.2"
|
||||
crossbeam-channel = "0.5"
|
||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||
env_logger = "0.10"
|
||||
env_logger = "0.11"
|
||||
flate2 = "1.0"
|
||||
foreign-types = "0.3"
|
||||
futures = "0.3"
|
||||
h2 = { version = "0.3", features = [ "stream" ] }
|
||||
h2 = { version = "0.4", features = [ "legacy", "stream" ] }
|
||||
handlebars = "3.0"
|
||||
hex = "0.4.3"
|
||||
http = "0.2"
|
||||
hyper = { version = "0.14", features = [ "full" ] }
|
||||
lazy_static = "1.4"
|
||||
hyper = { version = "0.14", features = [ "backports", "deprecated", "full" ] }
|
||||
libc = "0.2"
|
||||
log = "0.4.17"
|
||||
nix = "0.26.1"
|
||||
@ -145,33 +151,29 @@ tokio = "1.6"
|
||||
tokio-openssl = "0.6.1"
|
||||
tokio-stream = "0.1.0"
|
||||
tokio-util = { version = "0.7", features = [ "io" ] }
|
||||
tracing = "0.1"
|
||||
tower-service = "0.3.0"
|
||||
udev = "0.4"
|
||||
url = "2.1"
|
||||
walkdir = "2"
|
||||
xdg = "2.2"
|
||||
zstd = { version = "0.12", features = [ "bindgen" ] }
|
||||
zstd-safe = "6.0"
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
async-trait.workspace = true
|
||||
apt-pkg-native.workspace = true
|
||||
base64.workspace = true
|
||||
bitflags.workspace = true
|
||||
bytes.workspace = true
|
||||
cidr.workspace = true
|
||||
const_format.workspace = true
|
||||
crc32fast.workspace = true
|
||||
crossbeam-channel.workspace = true
|
||||
endian_trait.workspace = true
|
||||
flate2.workspace = true
|
||||
futures.workspace = true
|
||||
h2.workspace = true
|
||||
handlebars.workspace = true
|
||||
hex.workspace = true
|
||||
http.workspace = true
|
||||
hyper.workspace = true
|
||||
lazy_static.workspace = true
|
||||
libc.workspace = true
|
||||
log.workspace = true
|
||||
nix.workspace = true
|
||||
@ -184,7 +186,6 @@ regex.workspace = true
|
||||
rustyline.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
siphasher.workspace = true
|
||||
syslog.workspace = true
|
||||
termcolor.workspace = true
|
||||
thiserror.workspace = true
|
||||
@ -192,24 +193,27 @@ tokio = { workspace = true, features = [ "fs", "io-util", "io-std", "macros", "n
|
||||
tokio-openssl.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
tokio-util = { workspace = true, features = [ "codec" ] }
|
||||
tower-service.workspace = true
|
||||
tracing.workspace = true
|
||||
udev.workspace = true
|
||||
url.workspace = true
|
||||
walkdir.workspace = true
|
||||
xdg.workspace = true
|
||||
zstd.workspace = true
|
||||
|
||||
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
||||
|
||||
# proxmox workspace
|
||||
proxmox-apt.workspace = true
|
||||
proxmox-apt-api-types.workspace = true
|
||||
proxmox-async.workspace = true
|
||||
proxmox-auth-api = { workspace = true, features = [ "api", "pam-authenticator" ] }
|
||||
proxmox-compression.workspace = true
|
||||
proxmox-config-digest.workspace = true
|
||||
proxmox-daemon.workspace = true
|
||||
proxmox-http = { workspace = true, features = [ "client-trait", "proxmox-async", "rate-limited-stream" ] } # pbs-client doesn't use these
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-io.workspace = true
|
||||
proxmox-lang.workspace = true
|
||||
proxmox-log.workspace = true
|
||||
proxmox-ldap.workspace = true
|
||||
proxmox-metrics.workspace = true
|
||||
proxmox-notify = { workspace = true, features = [ "pbs-context" ] }
|
||||
@ -219,21 +223,23 @@ proxmox-router = { workspace = true, features = [ "cli", "server"] }
|
||||
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
||||
proxmox-section-config.workspace = true
|
||||
proxmox-serde = { workspace = true, features = [ "serde_json" ] }
|
||||
proxmox-shared-cache.workspace = true
|
||||
proxmox-shared-memory.workspace = true
|
||||
proxmox-sortable-macro.workspace = true
|
||||
proxmox-subscription.workspace = true
|
||||
proxmox-sys = { workspace = true, features = [ "timer" ] }
|
||||
proxmox-systemd.workspace = true
|
||||
proxmox-tfa.workspace = true
|
||||
proxmox-time.workspace = true
|
||||
proxmox-uuid.workspace = true
|
||||
proxmox-worker-task.workspace = true
|
||||
pbs-api-types.workspace = true
|
||||
|
||||
# in their respective repo
|
||||
pathpatterns.workspace = true
|
||||
proxmox-acme.workspace = true
|
||||
pxar.workspace = true
|
||||
|
||||
# proxmox-backup workspace/internal crates
|
||||
pbs-api-types.workspace = true
|
||||
pbs-buildcfg.workspace = true
|
||||
pbs-client.workspace = true
|
||||
pbs-config.workspace = true
|
||||
@ -242,21 +248,27 @@ pbs-key-config.workspace = true
|
||||
pbs-tape.workspace = true
|
||||
pbs-tools.workspace = true
|
||||
proxmox-rrd.workspace = true
|
||||
proxmox-rrd-api-types.workspace = true
|
||||
|
||||
# Local path overrides
|
||||
# NOTE: You must run `cargo update` after changing this for it to take effect!
|
||||
[patch.crates-io]
|
||||
|
||||
#pbs-api-types = { path = "../proxmox/pbs-api-types" }
|
||||
#proxmox-acme = { path = "../proxmox/proxmox-acme" }
|
||||
#proxmox-apt = { path = "../proxmox/proxmox-apt" }
|
||||
#proxmox-apt-api-types = { path = "../proxmox/proxmox-apt-api-types" }
|
||||
#proxmox-async = { path = "../proxmox/proxmox-async" }
|
||||
#proxmox-auth-api = { path = "../proxmox/proxmox-auth-api" }
|
||||
#proxmox-borrow = { path = "../proxmox/proxmox-borrow" }
|
||||
#proxmox-compression = { path = "../proxmox/proxmox-compression" }
|
||||
#proxmox-config-digest = { path = "../proxmox/proxmox-config-digest" }
|
||||
#proxmox-daemon = { path = "../proxmox/proxmox-daemon" }
|
||||
#proxmox-fuse = { path = "../proxmox-fuse" }
|
||||
#proxmox-http = { path = "../proxmox/proxmox-http" }
|
||||
#proxmox-human-byte = { path = "../proxmox/proxmox-human-byte" }
|
||||
#proxmox-io = { path = "../proxmox/proxmox-io" }
|
||||
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
|
||||
#proxmox-log = { path = "../proxmox/proxmox-log" }
|
||||
#proxmox-ldap = { path = "../proxmox/proxmox-ldap" }
|
||||
#proxmox-metrics = { path = "../proxmox/proxmox-metrics" }
|
||||
#proxmox-notify = { path = "../proxmox/proxmox-notify" }
|
||||
@ -264,6 +276,7 @@ proxmox-rrd.workspace = true
|
||||
#proxmox-rest-server = { path = "../proxmox/proxmox-rest-server" }
|
||||
#proxmox-router = { path = "../proxmox/proxmox-router" }
|
||||
#proxmox-rrd = { path = "../proxmox/proxmox-rrd" }
|
||||
#proxmox-rrd-api-types = { path = "../proxmox/proxmox-rrd-api-types" }
|
||||
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
|
||||
#proxmox-section-config = { path = "../proxmox/proxmox-section-config" }
|
||||
#proxmox-serde = { path = "../proxmox/proxmox-serde" }
|
||||
@ -271,11 +284,12 @@ proxmox-rrd.workspace = true
|
||||
#proxmox-sortable-macro = { path = "../proxmox/proxmox-sortable-macro" }
|
||||
#proxmox-subscription = { path = "../proxmox/proxmox-subscription" }
|
||||
#proxmox-sys = { path = "../proxmox/proxmox-sys" }
|
||||
#proxmox-systemd = { path = "../proxmox/proxmox-systemd" }
|
||||
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
|
||||
#proxmox-time = { path = "../proxmox/proxmox-time" }
|
||||
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
|
||||
#proxmox-worker-task = { path = "../proxmox/proxmox-worker-task" }
|
||||
|
||||
#proxmox-acme = { path = "../proxmox/proxmox-acme" }
|
||||
#pathpatterns = {path = "../pathpatterns" }
|
||||
#pxar = { path = "../pxar" }
|
||||
|
||||
|
@ -5,8 +5,11 @@ Build & Release Notes
|
||||
``rustup`` Toolchain
|
||||
====================
|
||||
|
||||
We normally want to build with the ``rustc`` Debian package. To do that
|
||||
you can set the following ``rustup`` configuration:
|
||||
We normally want to build with the ``rustc`` Debian package (see below). If you
|
||||
still want to use ``rustup`` for other reasons (e.g. to easily switch between
|
||||
the official stable, beta, and nightly compilers), you should set the following
|
||||
``rustup`` configuration to use the Debian-provided ``rustc`` compiler
|
||||
by default:
|
||||
|
||||
# rustup toolchain link system /usr
|
||||
# rustup default system
|
||||
|
399
debian/changelog
vendored
@ -1,3 +1,402 @@
|
||||
rust-proxmox-backup (3.3.4-1) bookworm; urgency=medium
|
||||
|
||||
* fix #6185: client/docs: explicitly mention archive name restrictions
|
||||
|
||||
* docs: using-the-installer: adapt to raised root password length requirement
|
||||
|
||||
* disks: wipe: replace dd with write_all_at for zeroing disk
|
||||
|
||||
* fix #5946: disks: wipe: ensure GPT header backup is wiped
|
||||
|
||||
* docs: fix hash collision probability comparison
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 13 Mar 2025 13:04:05 +0100
|
||||
|
||||
rust-proxmox-backup (3.3.3-1) bookworm; urgency=medium
|
||||
|
||||
* api: datastore list: move checking if a datastore is mounted after we
|
||||
ensured that the user may actually access it. While this had no effect
|
||||
security wise, it could significantly increase the cost of this API
|
||||
endpoint in big setups with many datastores and many tenants that each
|
||||
have only access to one, or a small set, of datastores.
|
||||
|
||||
* Revert "fix #5710: api: backup: stat known chunks on backup finish" due to
|
||||
a big performance impact relative to what this is protectign against. We
|
||||
will work out a more efficient fix for this issue in the future.
|
||||
|
||||
* prune simulator: show backup entries that are kept also in the flat list
|
||||
of backups, not just in the calendar view.
|
||||
|
||||
* docs: improve the description for the garbage collection's cut-off time
|
||||
|
||||
* pxar extract: correctly honor the overwrite flag
|
||||
|
||||
* api: datastore: add missing log context for prune to avoid a case where
|
||||
the worker state being unknown after it finished.
|
||||
|
||||
* docs: add synopsis and basic docs for prune job configuration
|
||||
|
||||
* backup verification: handle manifest update errors as non-fatal to avoid
|
||||
that the job fails, as we want to continue with verificating the rest to
|
||||
ensure we uncover as much potential problems as possible.
|
||||
|
||||
* fix #4408: docs: add 'disaster recovery' section for tapes
|
||||
|
||||
* fix #6069: prune simulator: correctly handle schedules that mix both, a
|
||||
range and a step size at once.
|
||||
|
||||
* client: pxar: fix a race condition where the backup upload stream can miss
|
||||
an error from the create archive function, because the error state is only
|
||||
set after the backup stream was already polled. This avoids a edge case
|
||||
where a file-based backup was incorrectly marked as having succeeded while
|
||||
there was a error.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 11 Feb 2025 20:24:27 +0100
|
||||
|
||||
rust-proxmox-backup (3.3.2-2) bookworm; urgency=medium
|
||||
|
||||
* file-restore: fix regression with the new blockdev method used to pass
|
||||
disks of a backup to the isolated virtual machine.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 10 Dec 2024 12:14:47 +0100
|
||||
|
||||
rust-proxmox-backup (3.3.2-1) bookworm; urgency=medium
|
||||
|
||||
* pbs-client: remove `log` dependency and migrate to our common,
|
||||
`tracing`-based, logging infrastructure. No semantic change intended.
|
||||
|
||||
* file restore: switch to more modern blockdev option for drives in QEMU
|
||||
wrapper for the restore VM.
|
||||
|
||||
* pxar: client: fix missing file size check for metadata comparison
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 09 Dec 2024 10:37:32 +0100
|
||||
|
||||
rust-proxmox-backup (3.3.1-1) bookworm; urgency=medium
|
||||
|
||||
* tree-wide: add missing O_CLOEXEC flags to `openat` calls to avoid passing
|
||||
any open FD to new child processes which can have undesired side-effects
|
||||
like keeping a lock open longer than it should.
|
||||
|
||||
* cargo: update proxmox dependency of rest-server and sys crates to include
|
||||
some fixes for open FDs and a fix for the active task worker tracking, as
|
||||
on failing to update the index file the daemon did not finished the
|
||||
worker, causing a reference count issue where an old daemon could keep
|
||||
running forever.
|
||||
|
||||
* ui: check that store is set before trying to select anythin in the garbage
|
||||
collection (GC) job view.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 03 Dec 2024 18:11:04 +0100
|
||||
|
||||
rust-proxmox-backup (3.3.0-2) bookworm; urgency=medium
|
||||
|
||||
* tree-wide: fix various typos.
|
||||
|
||||
* ui: fix remove vanished tooltip to be valid for both sync directions.
|
||||
|
||||
* ui: mask unmounted datastores in datastore overview.
|
||||
|
||||
* server: push: fix supported api version check for minor version bump.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 28 Nov 2024 13:03:03 +0100
|
||||
|
||||
rust-proxmox-backup (3.3.0-1) bookworm; urgency=medium
|
||||
|
||||
* GC: add safety-check for nested datastore
|
||||
|
||||
* ui: make some more strings translatable
|
||||
|
||||
* docs: make sphinx ignore the environment cache to avoid missing synopsis
|
||||
in some HTML output, like for example the "Command Syntax" appendix.
|
||||
|
||||
* docs: add note for why FAT is not supported for as backing file system for
|
||||
datastores
|
||||
|
||||
* api: disks: directory: fail if mount unit already exists for a new file
|
||||
system
|
||||
|
||||
* : filter partitions without proper UUID in partition selector
|
||||
|
||||
* ui: version info: replace wrong hyphen separator with dot
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 27 Nov 2024 20:38:41 +0100
|
||||
|
||||
rust-proxmox-backup (3.2.14-1) bookworm; urgency=medium
|
||||
|
||||
* pull-sync: do not interpret older missing snapshots as needs-resync
|
||||
|
||||
* api: directory: use relative path when creating removable datastore
|
||||
|
||||
* ui: prune keep input: actually clear value on clear trigger click
|
||||
|
||||
* ui: datastore edit: fix empty-text for path field
|
||||
|
||||
* sync: push: pass full error context when returning error to job
|
||||
|
||||
* api: mount removable datastore: only log an informational message if the
|
||||
correct device is already mounted.
|
||||
|
||||
* api: sync: restrict edit permissions for the new push sync jobs to avoid
|
||||
that a user is able to create or edit sync jobs in push direction, but not
|
||||
able to see them.
|
||||
|
||||
* api: create datastore: fix checks to avoid that any datastore can contain
|
||||
another one to better handle the case for the new removable datastores.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 27 Nov 2024 14:42:56 +0100
|
||||
|
||||
rust-proxmox-backup (3.2.13-1) bookworm; urgency=medium
|
||||
|
||||
* update pxar dependency to fix selective extraction with the newly
|
||||
supported match patterns.
|
||||
|
||||
* reuse-datastore: avoid creating another prune job
|
||||
|
||||
* api: notification: add API routes for webhook targets
|
||||
|
||||
* management cli: add CLI for webhook targets
|
||||
|
||||
* ui: utils: enable webhook edit window
|
||||
|
||||
* ui: utils: add task description for mounting/unmounting
|
||||
|
||||
* ui: add onlineHelp for consent-banner option
|
||||
|
||||
* docs: client: fix example commands for client usage
|
||||
|
||||
* docs: explain some further caveats of the change detection modes
|
||||
|
||||
* ui: use same label for removable datastore created from disk
|
||||
|
||||
* api: maintenance: allow setting of maintenance mode if 'unmounting'
|
||||
|
||||
* docs: add more information for removable datastores
|
||||
|
||||
* ui: sync jobs: revert to single list for pull/push jobs, improve
|
||||
distinction between push and pull jobs through other means.
|
||||
|
||||
* ui: sync jobs: change default sorting to 'store' -> 'direction' -> 'id'
|
||||
|
||||
* ui: sync jobs: add search filter-box
|
||||
|
||||
* config: sync: use same config section type `sync` for push and pull, note
|
||||
that this breaks existing configurations and needs manual clean-up. As the
|
||||
package versions never made it beyond test this is ignored, as while it's
|
||||
not really ideal we never give guarantees for testing package versions,
|
||||
and the maintenance burden with the old style would not be ideal either.
|
||||
|
||||
* api: removable datastores: require Sys.Modify permission on /system/disks
|
||||
|
||||
* ui: allow resetting unmounting maintenance
|
||||
|
||||
* datastore: re-phrase error message when datastore is unavailable
|
||||
|
||||
* client: backup writer: fix regression in progress output
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 26 Nov 2024 17:05:23 +0100
|
||||
|
||||
rust-proxmox-backup (3.2.12-1) bookworm; urgency=medium
|
||||
|
||||
* fix #5853: client: pxar: exclude stale files on metadata/link read
|
||||
|
||||
* docs: fix wrong product name in certificate docs
|
||||
|
||||
* docs: explain the working principle of the change detection modes
|
||||
|
||||
* allow datastore creation in directory with lost+found directory
|
||||
|
||||
* fix #5801: manager: switch datastore update command to real API call to
|
||||
avoid early cancellation of the task.
|
||||
|
||||
* server: push: consistently use remote over target for error messages and
|
||||
various smaller improvements to related log messages.
|
||||
|
||||
* push: move log messages for removed snapshot/group
|
||||
|
||||
* fix #5710: api: backup: stat known chunks on backup finish to ensure any
|
||||
problem/corruption is caught earlier.
|
||||
|
||||
* pxar: extract: make invalid ACLs non-fatal, but only log them, there's
|
||||
nothing to win by failing the restore completely.
|
||||
|
||||
* server: push: log encountered empty backup groups during sync
|
||||
|
||||
* fix #3786: ui, api, cli: add resync-corrupt option to sync jobs
|
||||
|
||||
* docs: add security implications of prune and change detection mode
|
||||
|
||||
* fix #2996: client: backup restore: add support to pass match patterns for
|
||||
a selective restore
|
||||
|
||||
* docs: add installation media preparation and installation wizard guides
|
||||
|
||||
* api: enforce minimum character limit of 8 on new passwords to follow
|
||||
recent NIST recommendations.
|
||||
|
||||
* ui, api: support configuring a consent banner that is shown before login
|
||||
to allow complying with some (government) policy frameworks.
|
||||
|
||||
* ui, api: add initial support for removable datastore providing better
|
||||
integration for datastore located on a non-permanently attached medium.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 25 Nov 2024 22:52:11 +0100
|
||||
|
||||
rust-proxmox-backup (3.2.11-1) bookworm; urgency=medium
|
||||
|
||||
* fix #3044: server: implement push support for sync operations
|
||||
|
||||
* push sync related refactors
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 21 Nov 2024 12:03:50 +0100
|
||||
|
||||
rust-proxmox-backup (3.2.10-1) bookworm; urgency=medium
|
||||
|
||||
* api: disk list: do not fail but just log error on gathering smart data
|
||||
|
||||
* cargo: require proxmox-log 0.2.6 to reduce spamming the logs with the
|
||||
whole worker task contents
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 19 Nov 2024 22:36:14 +0100
|
||||
|
||||
rust-proxmox-backup (3.2.9-1) bookworm; urgency=medium
|
||||
|
||||
* client: catalog: fallback to metadata archives for dumping the catalog
|
||||
|
||||
* client: catalog shell: make the catalog optional and use the pxar accessor
|
||||
for navigation if the catalog is not provided, like its the case for
|
||||
example for split pxar archives.
|
||||
|
||||
* client: catalog shell: drop payload offset in `stat` output, as this is a
|
||||
internal value that only helps on debugging some specific development.
|
||||
|
||||
* sync: fix premature return in snapshot-skip filter logic to avoid that the
|
||||
first snapshot newer that the last synced one gets unconditionally
|
||||
included.
|
||||
|
||||
* fix #5861: ui: remove minimum required username length in dialog for
|
||||
changing the owner of a backup group, as PBS support usernames shorter
|
||||
than 4 characters since a while now.
|
||||
|
||||
* fix #5439: allow one to reuse an existing datastore on datastore creation
|
||||
|
||||
* ui: disallow datastore in the file system root, this is almost never what
|
||||
user want and they can still use the CLI for such an edge case.
|
||||
|
||||
* fix #5233: api: tape: add explicit required permissions for the move tape,
|
||||
update tape and destroy tape endpoints, requiring Tape.Modify and
|
||||
Tape.Write on the `/tape` ACL object path, respectively. This avoids
|
||||
requiring the use of the root account for basic tape management.
|
||||
|
||||
* client: catalog shell: make the root element its own parent to avoid
|
||||
navigating below archive root, which makes no sense and just causes odd
|
||||
glitches.
|
||||
|
||||
* api: disk management: avoid retrieving lsblk result twice when listing
|
||||
disks, while it's not overly expensive it certainly does not help to be
|
||||
performant either.
|
||||
|
||||
* api: disk management: parallelize retrieving the output from smartctl
|
||||
checks.
|
||||
|
||||
* fix #5600: pbs2to3: make check more flexible to allow one to run arbitrary
|
||||
newer '-pve' kernels after upgrade
|
||||
|
||||
* client: pxar: perform match pattern check for exclusion only once
|
||||
|
||||
* client: pxar: add debug output for exclude pattern matches to more
|
||||
conveniently debug possible issues.
|
||||
|
||||
* fix #5868: rest-server: handshake detection: avoid infinite loop on
|
||||
connections abort
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 14 Nov 2024 16:10:10 +0100
|
||||
|
||||
rust-proxmox-backup (3.2.8-1) bookworm; urgency=medium
|
||||
|
||||
* switch various log statements in worker tasks to the newer, more flexible
|
||||
proxmox log crate. With this change, errors from task logs are now also
|
||||
logged to the system log, increasing their visibility.
|
||||
|
||||
* datastore api: list snapshots: avoid calculating protected attribute
|
||||
twice per snapshot, this reduces the amounts of file metadata requests.
|
||||
|
||||
* avoid re-calculating the backup snapshot path's date time component when
|
||||
getting the full path, reducing calls to the relatively slow strftime
|
||||
function from libc.
|
||||
|
||||
* fix #3699: client: prefer the XDG cache directory for temporary files with
|
||||
a fallback to using /tmp, as before.
|
||||
|
||||
* sync job: improve log message for when syncing the root namespace.
|
||||
|
||||
* client: increase read buffer from 8 KiB to 4 MiB for raw image based
|
||||
backups. This reduces the time spent polling between the reader, chunker
|
||||
and uploader async tasks and thus can improve backup speed significantly,
|
||||
especially on setups with fast network and storage.
|
||||
|
||||
* client benchmark: avoid unnecessary allocation in the AES benchmark,
|
||||
causing artificial overhead. The benchmark AES results should now be more
|
||||
in line with the hardware capability and what the PBS client could already
|
||||
do. On our test system we saw an increase by an factor of 2.3 on this
|
||||
specific benchmark.
|
||||
|
||||
* docs: add external metrics server page
|
||||
|
||||
* tfa: webauthn: serialize OriginUrl following RFC6454
|
||||
|
||||
* factor out apt and apt-repository handling into a new library crate for
|
||||
re-use in other projects. There should be no functional change.
|
||||
|
||||
* fix various typos all over the place found using the rust based `typos`
|
||||
tool.
|
||||
|
||||
* datastore: data blob compression: increase compression throughput by
|
||||
switching away from a higher level zstd method to a lower level one, which
|
||||
allows us to control the target buffer size directly and thus avoid some
|
||||
allocation and syscall overhead. We saw the compression bandwidth increase
|
||||
by a factor of 1.19 in our tests where both the source data and the target
|
||||
datastore where located in memory backed tmpfs.
|
||||
|
||||
* daily-update: ensure notification system context is initialized.
|
||||
|
||||
* backup reader: derive if debug messages should be printed from the global
|
||||
log level. This avoids printing some debug messages by default, e.g., the
|
||||
"protocol upgrade done" message from sync jobs.
|
||||
|
||||
* ui: user view: disable 'Unlock TFA' button by default to improve UX if no
|
||||
user is selected.
|
||||
|
||||
* manager cli: ensure the worker tasks finishes when triggering a reload of
|
||||
the system network.
|
||||
|
||||
* fix #5622: backup client: properly handle rate and burst parameters.
|
||||
Previously, passing any non-integer value, like `1mb`, was ignored.
|
||||
|
||||
* tape: read element status: ignore responses where the library specifies
|
||||
that it will return a volume tag but then does not includes that field in
|
||||
the actual response. As both the primary and the alternative volume tag
|
||||
are not required by PBS, this specific error can simply be downgraded to a
|
||||
warning.
|
||||
|
||||
* pxar: dump archive: print entries to stdout instead of stderr
|
||||
|
||||
* sync jobs: various clean-ups and refactoring that should not result in any
|
||||
semantic change.
|
||||
|
||||
* metric collection: put metrics in a cache with a 30 minutes lifetime.
|
||||
|
||||
* api: add /status/metrics API to allow pull-based metric server to gather
|
||||
data directly.
|
||||
|
||||
* partial fix #5560: client: periodically show backup progress
|
||||
|
||||
* docs: add proxmox-backup.node.cfg man page
|
||||
|
||||
* docs: sync: explicitly mention `removed-vanish` flag
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 18 Oct 2024 19:05:41 +0200
|
||||
|
||||
rust-proxmox-backup (3.2.7-1) bookworm; urgency=medium
|
||||
|
||||
* docs: drop blanket statement recommending against remote storage
|
||||
|
71
debian/control
vendored
@ -15,7 +15,6 @@ Build-Depends: bash-completion,
|
||||
libacl1-dev,
|
||||
libfuse3-dev,
|
||||
librust-anyhow-1+default-dev,
|
||||
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
|
||||
librust-async-trait-0.1+default-dev (>= 0.1.56-~~),
|
||||
librust-base64-0.13+default-dev,
|
||||
librust-bitflags-2+default-dev (>= 2.4-~~),
|
||||
@ -26,19 +25,16 @@ Build-Depends: bash-completion,
|
||||
librust-crossbeam-channel-0.5+default-dev,
|
||||
librust-endian-trait-0.6+arrays-dev,
|
||||
librust-endian-trait-0.6+default-dev,
|
||||
librust-env-logger-0.10+default-dev,
|
||||
librust-flate2-1+default-dev,
|
||||
librust-env-logger-0.11+default-dev,
|
||||
librust-foreign-types-0.3+default-dev,
|
||||
librust-futures-0.3+default-dev,
|
||||
librust-h2-0.3+default-dev,
|
||||
librust-h2-0.3+stream-dev,
|
||||
librust-handlebars-3+default-dev,
|
||||
librust-h2-0.4+default-dev,
|
||||
librust-h2-0.4+stream-dev,
|
||||
librust-hex-0.4+default-dev (>= 0.4.3-~~),
|
||||
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
|
||||
librust-http-0.2+default-dev,
|
||||
librust-hyper-0.14+default-dev,
|
||||
librust-hyper-0.14+full-dev,
|
||||
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
||||
librust-libc-0.2+default-dev,
|
||||
librust-log-0.4+default-dev (>= 0.4.17-~~),
|
||||
librust-nix-0.26+default-dev (>= 0.26.1-~~),
|
||||
@ -47,10 +43,13 @@ Build-Depends: bash-completion,
|
||||
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
||||
librust-openssl-0.10+default-dev (>= 0.10.40-~~),
|
||||
librust-pathpatterns-0.3+default-dev,
|
||||
librust-pbs-api-types-0.2+default-dev,
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||
librust-pin-project-lite-0.2+default-dev,
|
||||
librust-proxmox-acme-0.5+default-dev,
|
||||
librust-proxmox-apt-0.10+default-dev (>= 0.10.5-~~),
|
||||
librust-proxmox-acme-0.5+default-dev (>= 0.5.3-~~),
|
||||
librust-proxmox-apt-0.11+cache-dev,
|
||||
librust-proxmox-apt-0.11+default-dev,
|
||||
librust-proxmox-apt-api-types-1+default-dev (>= 1.0.1-~~),
|
||||
librust-proxmox-async-0.4+default-dev,
|
||||
librust-proxmox-auth-api-0.4+api-dev,
|
||||
librust-proxmox-auth-api-0.4+api-types-dev,
|
||||
@ -58,6 +57,8 @@ Build-Depends: bash-completion,
|
||||
librust-proxmox-auth-api-0.4+pam-authenticator-dev,
|
||||
librust-proxmox-borrow-1+default-dev,
|
||||
librust-proxmox-compression-0.2+default-dev,
|
||||
librust-proxmox-config-digest-0.1+default-dev,
|
||||
librust-proxmox-daemon-0.1+default-dev,
|
||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~),
|
||||
librust-proxmox-http-0.9+client-dev,
|
||||
librust-proxmox-http-0.9+client-trait-dev,
|
||||
@ -72,44 +73,48 @@ Build-Depends: bash-completion,
|
||||
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
|
||||
librust-proxmox-lang-1+default-dev (>= 1.1-~~),
|
||||
librust-proxmox-ldap-0.2+default-dev (>= 0.2.1-~~),
|
||||
librust-proxmox-log-0.2+default-dev (>= 0.2.6-~~),
|
||||
librust-proxmox-metrics-0.3+default-dev (>= 0.3.1-~~),
|
||||
librust-proxmox-notify-0.4+default-dev,
|
||||
librust-proxmox-notify-0.4+pbs-context-dev,
|
||||
librust-proxmox-notify-0.5+default-dev (>= 0.5.1-~~),
|
||||
librust-proxmox-notify-0.5+pbs-context-dev (>= 0.5.1-~~),
|
||||
librust-proxmox-openid-0.10+default-dev,
|
||||
librust-proxmox-rest-server-0.5+default-dev (>= 0.5.1-~~),
|
||||
librust-proxmox-rest-server-0.5+rate-limited-stream-dev (>= 0.5.1-~~),
|
||||
librust-proxmox-rest-server-0.5+templates-dev (>= 0.5.1-~~),
|
||||
librust-proxmox-router-2+cli-dev,
|
||||
librust-proxmox-router-2+server-dev,
|
||||
librust-proxmox-rrd-0.2+default-dev,
|
||||
librust-proxmox-schema-3+api-macro-dev,
|
||||
librust-proxmox-schema-3+default-dev,
|
||||
librust-proxmox-rest-server-0.8+default-dev (>= 0.8.5-~~),
|
||||
librust-proxmox-rest-server-0.8+rate-limited-stream-dev (>= 0.8.5-~~),
|
||||
librust-proxmox-rest-server-0.8+templates-dev (>= 0.8.5-~~),
|
||||
librust-proxmox-router-3+cli-dev,
|
||||
librust-proxmox-router-3+server-dev,
|
||||
librust-proxmox-rrd-0.4+default-dev,
|
||||
librust-proxmox-rrd-api-types-1+default-dev (>= 1.0.2-~~),
|
||||
librust-proxmox-schema-4+api-macro-dev,
|
||||
librust-proxmox-schema-4+default-dev,
|
||||
librust-proxmox-section-config-2+default-dev,
|
||||
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~),
|
||||
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~),
|
||||
librust-proxmox-shared-cache-0.1+default-dev,
|
||||
librust-proxmox-shared-memory-0.3+default-dev,
|
||||
librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~),
|
||||
librust-proxmox-subscription-0.4+api-types-dev (>= 0.4.2-~~),
|
||||
librust-proxmox-subscription-0.4+default-dev (>= 0.4.2-~~),
|
||||
librust-proxmox-sys-0.5+acl-dev (>= 0.5.7-~~),
|
||||
librust-proxmox-sys-0.5+crypt-dev (>= 0.5.7-~~),
|
||||
librust-proxmox-sys-0.5+default-dev (>= 0.5.7-~~),
|
||||
librust-proxmox-sys-0.5+logrotate-dev (>= 0.5.7-~~),
|
||||
librust-proxmox-sys-0.5+timer-dev (>= 0.5.7-~~),
|
||||
librust-proxmox-tfa-4+api-dev (>= 4.0.4-~~),
|
||||
librust-proxmox-tfa-4+api-types-dev (>= 4.0.4-~~),
|
||||
librust-proxmox-tfa-4+default-dev (>= 4.0.4-~~),
|
||||
librust-proxmox-subscription-0.5+api-types-dev,
|
||||
librust-proxmox-subscription-0.5+default-dev,
|
||||
librust-proxmox-sys-0.6+acl-dev (>= 0.6.5-~~),
|
||||
librust-proxmox-sys-0.6+crypt-dev (>= 0.6.5-~~),
|
||||
librust-proxmox-sys-0.6+default-dev (>= 0.6.5-~~),
|
||||
librust-proxmox-sys-0.6+logrotate-dev (>= 0.6.5-~~),
|
||||
librust-proxmox-sys-0.6+timer-dev (>= 0.6.5-~~),
|
||||
librust-proxmox-systemd-0.1+default-dev,
|
||||
librust-proxmox-tfa-5+api-dev,
|
||||
librust-proxmox-tfa-5+api-types-dev,
|
||||
librust-proxmox-tfa-5+default-dev,
|
||||
librust-proxmox-time-2+default-dev,
|
||||
librust-proxmox-uuid-1+default-dev,
|
||||
librust-proxmox-uuid-1+serde-dev,
|
||||
librust-pxar-0.12+default-dev,
|
||||
librust-proxmox-worker-task-0.1+default-dev,
|
||||
librust-pxar-0.12+default-dev (>= 0.12.1-~~),
|
||||
librust-regex-1+default-dev (>= 1.5.5-~~),
|
||||
librust-rustyline-9+default-dev,
|
||||
librust-serde-1+default-dev,
|
||||
librust-serde-1+derive-dev,
|
||||
librust-serde-json-1+default-dev,
|
||||
librust-serde-plain-1+default-dev,
|
||||
librust-siphasher-0.3+default-dev,
|
||||
librust-syslog-6+default-dev,
|
||||
librust-tar-0.4+default-dev,
|
||||
librust-termcolor-1+default-dev (>= 1.1.2-~~),
|
||||
@ -133,12 +138,14 @@ Build-Depends: bash-completion,
|
||||
librust-tokio-util-0.7+default-dev,
|
||||
librust-tokio-util-0.7+io-dev,
|
||||
librust-tower-service-0.3+default-dev,
|
||||
librust-tracing-0.1+default-dev,
|
||||
librust-udev-0.4+default-dev,
|
||||
librust-url-2+default-dev (>= 2.1-~~),
|
||||
librust-walkdir-2+default-dev,
|
||||
librust-xdg-2+default-dev (>= 2.2-~~),
|
||||
librust-zstd-0.12+bindgen-dev,
|
||||
librust-zstd-0.12+default-dev,
|
||||
librust-zstd-safe-6+default-dev,
|
||||
libsgutils2-dev,
|
||||
libstd-rust-dev,
|
||||
libsystemd-dev (>= 246-~~),
|
||||
@ -177,7 +184,7 @@ Depends: fonts-font-awesome,
|
||||
postfix | mail-transport-agent,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 4.1.4),
|
||||
proxmox-widget-toolkit (>= 4.3.3),
|
||||
pve-xtermjs (>= 4.7.0-1),
|
||||
sg3-utils,
|
||||
smartmontools,
|
||||
|
2
debian/copyright
vendored
@ -1,4 +1,4 @@
|
||||
Copyright (C) 2019 - 2024 Proxmox Server Solutions GmbH
|
||||
Copyright (C) 2019 - 2025 Proxmox Server Solutions GmbH
|
||||
|
||||
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||
|
||||
|
2
debian/proxmox-backup-file-restore.postinst
vendored
@ -9,7 +9,7 @@ update_initramfs() {
|
||||
CACHE_PATH_DBG="/var/cache/proxmox-backup/file-restore-initramfs-debug.img"
|
||||
|
||||
# cleanup first, in case proxmox-file-restore was uninstalled since we do
|
||||
# not want an unuseable image lying around
|
||||
# not want an unusable image lying around
|
||||
rm -f "$CACHE_PATH"
|
||||
|
||||
if [ ! -f "$INST_PATH/initramfs.img" ]; then
|
||||
|
25
debian/proxmox-backup-server.install
vendored
@ -4,6 +4,7 @@ etc/proxmox-backup-daily-update.service /lib/systemd/system/
|
||||
etc/proxmox-backup-daily-update.timer /lib/systemd/system/
|
||||
etc/proxmox-backup-proxy.service /lib/systemd/system/
|
||||
etc/proxmox-backup.service /lib/systemd/system/
|
||||
etc/removable-device-attach@.service /lib/systemd/system/
|
||||
usr/bin/pmt
|
||||
usr/bin/pmtx
|
||||
usr/bin/proxmox-tape
|
||||
@ -30,34 +31,31 @@ usr/share/man/man5/acl.cfg.5
|
||||
usr/share/man/man5/datastore.cfg.5
|
||||
usr/share/man/man5/domains.cfg.5
|
||||
usr/share/man/man5/media-pool.cfg.5
|
||||
usr/share/man/man5/notifications.cfg.5
|
||||
usr/share/man/man5/notifications-priv.cfg.5
|
||||
usr/share/man/man5/notifications.cfg.5
|
||||
usr/share/man/man5/proxmox-backup.node.cfg.5
|
||||
usr/share/man/man5/remote.cfg.5
|
||||
usr/share/man/man5/sync.cfg.5
|
||||
usr/share/man/man5/tape-job.cfg.5
|
||||
usr/share/man/man5/tape.cfg.5
|
||||
usr/share/man/man5/user.cfg.5
|
||||
usr/share/man/man5/verification.cfg.5
|
||||
usr/share/zsh/vendor-completions/_pmt
|
||||
usr/share/zsh/vendor-completions/_pmtx
|
||||
usr/share/zsh/vendor-completions/_proxmox-backup-debug
|
||||
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
||||
usr/share/zsh/vendor-completions/_proxmox-tape
|
||||
usr/share/man/man5/prune.cfg.5
|
||||
usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/gc-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/gc-err-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/gc-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/gc-ok-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/package-updates-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/package-updates-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/prune-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/prune-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/prune-err-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/prune-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/prune-ok-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/sync-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/sync-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/sync-err-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/sync-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/sync-ok-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-backup-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-backup-err-subject.txt.hbs
|
||||
@ -65,10 +63,15 @@ usr/share/proxmox-backup/templates/default/tape-backup-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-backup-ok-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/test-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/test-body.html.hbs
|
||||
usr/share/proxmox-backup/templates/default/test-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/test-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/verify-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/verify-err-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/verify-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/verify-ok-subject.txt.hbs
|
||||
usr/share/zsh/vendor-completions/_pmt
|
||||
usr/share/zsh/vendor-completions/_pmtx
|
||||
usr/share/zsh/vendor-completions/_proxmox-backup-debug
|
||||
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
||||
usr/share/zsh/vendor-completions/_proxmox-tape
|
||||
|
3
debian/proxmox-backup-server.udev
vendored
@ -16,3 +16,6 @@ SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SER
|
||||
SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg"
|
||||
|
||||
LABEL="persistent_storage_tape_end"
|
||||
|
||||
# triggers the mounting of a removable device
|
||||
ACTION=="add", SUBSYSTEM=="block", ENV{ID_FS_UUID}!="", TAG+="systemd", ENV{SYSTEMD_WANTS}="removable-device-attach@$env{ID_FS_UUID}"
|
5
debian/rules
vendored
@ -28,6 +28,11 @@ override_dh_auto_configure:
|
||||
@perl -ne 'if (/^version\s*=\s*"(\d+(?:\.\d+)+)"/) { my $$v_cargo = $$1; my $$v_deb = "$(DEB_VERSION_UPSTREAM)"; \
|
||||
die "ERROR: d/changelog <-> Cargo.toml version mismatch: $$v_cargo != $$v_deb\n" if $$v_cargo ne $$v_deb; exit(0); }' Cargo.toml
|
||||
$(CARGO) prepare-debian $(CURDIR)/debian/cargo_registry --link-from-system
|
||||
# `cargo build` and `cargo install` have different config precedence, symlink
|
||||
# the wrapper config into a place where `build` picks it up as well..
|
||||
# https://doc.rust-lang.org/cargo/commands/cargo-install.html#configuration-discovery
|
||||
mkdir -p .cargo
|
||||
ln -s $(CARGO_HOME)/config.toml $(CURDIR)/.cargo/config.toml
|
||||
dh_auto_configure
|
||||
|
||||
override_dh_auto_build:
|
||||
|
@ -1,59 +1,65 @@
|
||||
include ../defines.mk
|
||||
|
||||
GENERATED_SYNOPSIS := \
|
||||
proxmox-tape/synopsis.rst \
|
||||
proxmox-backup-client/synopsis.rst \
|
||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||
proxmox-backup-manager/synopsis.rst \
|
||||
proxmox-backup-debug/synopsis.rst \
|
||||
proxmox-file-restore/synopsis.rst \
|
||||
pxar/synopsis.rst \
|
||||
pmtx/synopsis.rst \
|
||||
pmt/synopsis.rst \
|
||||
config/media-pool/config.rst \
|
||||
config/notifications/config.rst \
|
||||
config/notifications-priv/config.rst \
|
||||
config/tape/config.rst \
|
||||
config/tape-job/config.rst \
|
||||
config/user/config.rst \
|
||||
config/remote/config.rst \
|
||||
config/sync/config.rst \
|
||||
config/verification/config.rst \
|
||||
config/acl/roles.rst \
|
||||
config/datastore/config.rst \
|
||||
config/domains/config.rst
|
||||
config/domains/config.rst \
|
||||
config/media-pool/config.rst \
|
||||
config/notifications-priv/config.rst \
|
||||
config/notifications/config.rst \
|
||||
config/remote/config.rst \
|
||||
config/sync/config.rst \
|
||||
config/tape-job/config.rst \
|
||||
config/tape/config.rst \
|
||||
config/user/config.rst \
|
||||
config/verification/config.rst \
|
||||
config/prune/config.rst \
|
||||
pmt/synopsis.rst \
|
||||
pmtx/synopsis.rst \
|
||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||
proxmox-backup-client/synopsis.rst \
|
||||
proxmox-backup-debug/synopsis.rst \
|
||||
proxmox-backup-manager/synopsis.rst \
|
||||
proxmox-file-restore/synopsis.rst \
|
||||
proxmox-tape/synopsis.rst \
|
||||
pxar/synopsis.rst \
|
||||
|
||||
MAN1_PAGES := \
|
||||
pxar.1 \
|
||||
pmtx.1 \
|
||||
pmt.1 \
|
||||
proxmox-tape.1 \
|
||||
proxmox-backup-proxy.1 \
|
||||
proxmox-backup-client.1 \
|
||||
proxmox-backup-manager.1 \
|
||||
proxmox-file-restore.1 \
|
||||
proxmox-backup-debug.1 \
|
||||
pbs2to3.1 \
|
||||
pmt.1 \
|
||||
pmtx.1 \
|
||||
proxmox-backup-client.1 \
|
||||
proxmox-backup-debug.1 \
|
||||
proxmox-backup-manager.1 \
|
||||
proxmox-backup-proxy.1 \
|
||||
proxmox-file-restore.1 \
|
||||
proxmox-tape.1 \
|
||||
pxar.1 \
|
||||
|
||||
# FIXME: prefix all man pages that are not directly relating to an existing executable with
|
||||
# `proxmox-backup.`, like the newer added proxmox-backup.node.cfg but add backwards compatible
|
||||
# symlinks, e.g. with a "5pbs" man page "suffix section".
|
||||
MAN5_PAGES := \
|
||||
media-pool.cfg.5 \
|
||||
tape.cfg.5 \
|
||||
tape-job.cfg.5 \
|
||||
acl.cfg.5 \
|
||||
user.cfg.5 \
|
||||
remote.cfg.5 \
|
||||
sync.cfg.5 \
|
||||
verification.cfg.5 \
|
||||
datastore.cfg.5 \
|
||||
domains.cfg.5 \
|
||||
notifications.cfg.5 \
|
||||
media-pool.cfg.5 \
|
||||
proxmox-backup.node.cfg.5 \
|
||||
notifications-priv.cfg.5 \
|
||||
notifications.cfg.5 \
|
||||
remote.cfg.5 \
|
||||
sync.cfg.5 \
|
||||
tape-job.cfg.5 \
|
||||
tape.cfg.5 \
|
||||
user.cfg.5 \
|
||||
verification.cfg.5 \
|
||||
prune.cfg.5 \
|
||||
|
||||
PRUNE_SIMULATOR_FILES := \
|
||||
prune-simulator/index.html \
|
||||
prune-simulator/documentation.html \
|
||||
prune-simulator/clear-trigger.png \
|
||||
prune-simulator/prune-simulator.js
|
||||
prune-simulator/documentation.html \
|
||||
prune-simulator/prune-simulator.js \
|
||||
|
||||
PRUNE_SIMULATOR_JS_SOURCE := \
|
||||
/usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \
|
||||
@ -85,7 +91,7 @@ API_VIEWER_FILES := \
|
||||
/usr/share/javascript/proxmox-widget-toolkit-dev/APIViewer.js \
|
||||
|
||||
# Sphinx documentation setup
|
||||
SPHINXOPTS =
|
||||
SPHINXOPTS = -E
|
||||
SPHINXBUILD = sphinx-build
|
||||
BUILDDIR = output
|
||||
|
||||
|
@ -169,6 +169,7 @@ the client. The format is:
|
||||
|
||||
<archive-name>.<type>:<source-path>
|
||||
|
||||
The ``archive-name`` must contain alphanumerics, hyphens and underscores only.
|
||||
Common types are ``.pxar`` for file archives and ``.img`` for block
|
||||
device images. To create a backup of a block device, run the following command:
|
||||
|
||||
@ -272,13 +273,13 @@ parameter. For example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client backup.pxar:./linux --exclude /usr
|
||||
# proxmox-backup-client backup archive-name.pxar:./linux --exclude /usr
|
||||
|
||||
Multiple paths can be excluded like this:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client backup.pxar:./linux --exclude=/usr --exclude=/rust
|
||||
# proxmox-backup-client backup archive-name.pxar:./linux --exclude=/usr --exclude=/rust
|
||||
|
||||
.. _client_change_detection_mode:
|
||||
|
||||
@ -295,30 +296,22 @@ therefore deduplicated). If the backed up files are largely unchanged,
|
||||
re-reading and then detecting the corresponding chunks don't need to be uploaded
|
||||
after all is time consuming and undesired.
|
||||
|
||||
The backup client's `change-detection-mode` can be switched from default to
|
||||
`metadata` based detection to reduce limitations as described above, instructing
|
||||
the client to avoid re-reading files with unchanged metadata whenever possible.
|
||||
The backup client's ``change-detection-mode`` can be switched from default to
|
||||
``metadata`` based detection to reduce limitations as described above,
|
||||
instructing the client to avoid re-reading files with unchanged metadata
|
||||
whenever possible.
|
||||
When using this mode, instead of the regular pxar archive, the backup snapshot
|
||||
is stored into two separate files: the `mpxar` containing the archive's metadata
|
||||
and the `ppxar` containing a concatenation of the file contents. This splitting
|
||||
allows for efficient metadata lookups.
|
||||
is stored into two separate files: the ``mpxar`` containing the archive's
|
||||
metadata and the ``ppxar`` containing a concatenation of the file contents. This
|
||||
splitting allows for efficient metadata lookups. When creating the backup
|
||||
archives, the current file metadata is compared to the one looked up in the
|
||||
previous ``mpxar`` archive. The operational details are explained more in depth
|
||||
in the :ref:`technical documentation <change-detection-mode-metadata>`.
|
||||
|
||||
Using the `change-detection-mode` set to `data` allows to create the same split
|
||||
archive as when using the `metadata` mode, but without using a previous
|
||||
reference and therefore reencoding all file payloads.
|
||||
When creating the backup archives, the current file metadata is compared to the
|
||||
one looked up in the previous `mpxar` archive.
|
||||
The metadata comparison includes file size, file type, ownership and permission
|
||||
information, as well as acls and attributes and most importantly the file's
|
||||
mtime, for details see the
|
||||
:ref:`pxar metadata archive format <pxar-meta-format>`.
|
||||
|
||||
If unchanged, the entry is cached for possible re-use of content chunks without
|
||||
re-reading, by indexing the already present chunks containing the contents from
|
||||
the previous backup snapshot. Since the file might only partially re-use chunks
|
||||
(thereby introducing wasted space in the form of padding), the decision whether
|
||||
to re-use or re-encode the currently cached entries is postponed to when enough
|
||||
information is available, comparing the possible padding to a threshold value.
|
||||
Using the ``change-detection-mode`` set to ``data`` allows to create the same
|
||||
split archive as when using the ``metadata`` mode, but without using a previous
|
||||
reference and therefore reencoding all file payloads. For details of this mode
|
||||
please see the :ref:`technical documentation <change-detection-mode-data>`.
|
||||
|
||||
.. _client_change_detection_mode_table:
|
||||
|
||||
@ -337,7 +330,7 @@ mode:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client backup.pxar:./linux --change-detection-mode=metadata
|
||||
# proxmox-backup-client backup archive-name.pxar:./linux --change-detection-mode=metadata
|
||||
|
||||
.. _client_encryption:
|
||||
|
||||
@ -478,6 +471,8 @@ version of your master key. The following command sends the output of the
|
||||
proxmox-backup-client key paperkey --output-format text > qrkey.txt
|
||||
|
||||
|
||||
.. _client_restoring_data:
|
||||
|
||||
Restoring Data
|
||||
--------------
|
||||
|
||||
@ -789,29 +784,25 @@ Garbage Collection
|
||||
------------------
|
||||
|
||||
The ``prune`` command removes only the backup index files, not the data
|
||||
from the datastore. This task is left to the garbage collection
|
||||
command. It is recommended to carry out garbage collection on a regular basis.
|
||||
from the datastore. Deletion of unused backup data from the datastore is done by
|
||||
:ref:`garbage collection<_maintenance_gc>`. It is therefore recommended to
|
||||
schedule garbage collection tasks on a regular basis. The working principle of
|
||||
garbage collection is described in more details in the related :ref:`background
|
||||
section <gc_background>`.
|
||||
|
||||
The garbage collection works in two phases. In the first phase, all
|
||||
data blocks that are still in use are marked. In the second phase,
|
||||
unused data blocks are removed.
|
||||
To start garbage collection from the client side, run the following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client garbage-collect
|
||||
|
||||
.. note:: This command needs to read all existing backup index files
|
||||
and touches the complete chunk-store. This can take a long time
|
||||
depending on the number of chunks and the speed of the underlying
|
||||
disks.
|
||||
|
||||
.. note:: The garbage collection will only remove chunks that haven't been used
|
||||
for at least one day (exactly 24h 5m). This grace period is necessary because
|
||||
chunks in use are marked by touching the chunk which updates the ``atime``
|
||||
(access time) property. Filesystems are mounted with the ``relatime`` option
|
||||
by default. This results in a better performance by only updating the
|
||||
``atime`` property if the last access has been at least 24 hours ago. The
|
||||
downside is that touching a chunk within these 24 hours will not always
|
||||
update its ``atime`` property.
|
||||
|
||||
Chunks in the grace period will be logged at the end of the garbage
|
||||
collection task as *Pending removals*.
|
||||
The progress of the garbage collection will be displayed as shown in the example
|
||||
below:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
@ -44,10 +44,8 @@ web-interface/API or using the ``proxmox-backup-manager`` CLI tool.
|
||||
Upload Custom Certificate
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
If you already have a certificate which you want to use for a Proxmox
|
||||
Mail Gateway host, you can simply upload that certificate over the web
|
||||
interface.
|
||||
|
||||
If you already have a certificate which you want to use for a `Proxmox Backup`_
|
||||
host, you can simply upload that certificate over the web interface.
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-certs-upload-custom.png
|
||||
:target: _images/pbs-gui-certs-upload-custom.png
|
||||
|
@ -71,7 +71,7 @@ master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = 'Proxmox Backup'
|
||||
copyright = '2019-2023, Proxmox Server Solutions GmbH'
|
||||
copyright = '2019-2025, Proxmox Server Solutions GmbH'
|
||||
author = 'Proxmox Support Team'
|
||||
|
||||
# The version info for the project you're documenting acts as a replacement for
|
||||
@ -108,12 +108,14 @@ man_pages = [
|
||||
('config/datastore/man5', 'datastore.cfg', 'Datastore Configuration', [author], 5),
|
||||
('config/domains/man5', 'domains.cfg', 'Realm Configuration', [author], 5),
|
||||
('config/media-pool/man5', 'media-pool.cfg', 'Media Pool Configuration', [author], 5),
|
||||
('config/node/man5', 'proxmox-backup.node.cfg', 'Proxmox Backup Server - Node Configuration', [author], 5),
|
||||
('config/remote/man5', 'remote.cfg', 'Remote Server Configuration', [author], 5),
|
||||
('config/sync/man5', 'sync.cfg', 'Synchronization Job Configuration', [author], 5),
|
||||
('config/tape-job/man5', 'tape-job.cfg', 'Tape Job Configuration', [author], 5),
|
||||
('config/tape/man5', 'tape.cfg', 'Tape Drive and Changer Configuration', [author], 5),
|
||||
('config/user/man5', 'user.cfg', 'User Configuration', [author], 5),
|
||||
('config/verification/man5', 'verification.cfg', 'Verification Job Configuration', [author], 5),
|
||||
('config/prune/man5', 'prune.cfg', 'Prune Job Configuration', [author], 5),
|
||||
('config/notifications/man5', 'notifications.cfg', 'Notification target/matcher configuration', [author], 5),
|
||||
('config/notifications-priv/man5', 'notifications-priv.cfg', 'Notification target secrets', [author], 5),
|
||||
]
|
||||
|
49
docs/config/node/format.rst
Normal file
@ -0,0 +1,49 @@
|
||||
The file contains these options:
|
||||
|
||||
:acme: The ACME account to use on this node.
|
||||
|
||||
:acmedomain0: ACME domain.
|
||||
|
||||
:acmedomain1: ACME domain.
|
||||
|
||||
:acmedomain2: ACME domain.
|
||||
|
||||
:acmedomain3: ACME domain.
|
||||
|
||||
:acmedomain4: ACME domain.
|
||||
|
||||
:http-proxy: Set proxy for apt and subscription checks.
|
||||
|
||||
:email-from: Fallback email from which notifications will be sent.
|
||||
|
||||
:ciphers-tls-1.3: List of TLS ciphers for TLS 1.3 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.)
|
||||
|
||||
:ciphers-tls-1.2: List of TLS ciphers for TLS <= 1.2 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.)
|
||||
|
||||
:default-lang: Default language used in the GUI.
|
||||
|
||||
:description: Node description.
|
||||
|
||||
:task-log-max-days: Maximum days to keep task logs.
|
||||
|
||||
For example:
|
||||
|
||||
::
|
||||
|
||||
acme: local
|
||||
acmedomain0: first.domain.com
|
||||
acmedomain1: second.domain.com
|
||||
acmedomain2: third.domain.com
|
||||
acmedomain3: fourth.domain.com
|
||||
acmedomain4: fifth.domain.com
|
||||
http-proxy: internal.proxy.com
|
||||
email-from: proxmox@mail.com
|
||||
ciphers-tls-1.3: TLS_AES_128_GCM_SHA256:TLS_AES_128_CCM_8_SHA256:TLS_CHACHA20_POLY1305_SHA256
|
||||
ciphers-tls-1.2: RSA_WITH_AES_128_CCM:DHE_RSA_WITH_AES_128_CCM
|
||||
default-lang: en
|
||||
description: Primary PBS instance
|
||||
task-log-max-days: 30
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager node`` command to manipulate
|
||||
this file.
|
18
docs/config/node/man5.rst
Normal file
@ -0,0 +1,18 @@
|
||||
:orphan:
|
||||
|
||||
========
|
||||
node.cfg
|
||||
========
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/node.cfg is a configuration file for Proxmox
|
||||
Backup Server. It contains the general configuration regarding this node.
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
@ -8,7 +8,7 @@ Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/notifications-priv.cfg is a configuration file
|
||||
for Proxmox Backup Server. It contains the configration for the
|
||||
for Proxmox Backup Server. It contains the configuration for the
|
||||
notification system configuration.
|
||||
|
||||
File Format
|
||||
|
@ -8,7 +8,7 @@ Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/notifications.cfg is a configuration file
|
||||
for Proxmox Backup Server. It contains the configration for the
|
||||
for Proxmox Backup Server. It contains the configuration for the
|
||||
notification system configuration.
|
||||
|
||||
File Format
|
||||
|
14
docs/config/prune/format.rst
Normal file
@ -0,0 +1,14 @@
|
||||
Each entry starts with the header ``prune: <name>``, followed by the job
|
||||
configuration options.
|
||||
|
||||
::
|
||||
|
||||
prune: prune-store2
|
||||
schedule mon..fri 10:30
|
||||
store my-datastore
|
||||
|
||||
prune: ...
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager prune-job`` command to manipulate this
|
||||
file.
|
23
docs/config/prune/man5.rst
Normal file
@ -0,0 +1,23 @@
|
||||
:orphan:
|
||||
|
||||
=========
|
||||
prune.cfg
|
||||
=========
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/prune.cfg is a configuration file for Proxmox
|
||||
Backup Server. It contains the prune job configuration.
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
@ -7,8 +7,8 @@ verification.cfg
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/sync.cfg is a configuration file for Proxmox
|
||||
Backup Server. It contains the verification job configuration.
|
||||
The file /etc/proxmox-backup/verification.cfg is a configuration file for
|
||||
Proxmox Backup Server. It contains the verification job configuration.
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
@ -67,6 +67,14 @@ Options
|
||||
|
||||
.. include:: config/media-pool/config.rst
|
||||
|
||||
``node.cfg``
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/node/format.rst
|
||||
|
||||
.. _notifications.cfg:
|
||||
|
||||
``notifications.cfg``
|
||||
@ -100,6 +108,21 @@ Options
|
||||
.. include:: config/notifications-priv/config.rst
|
||||
|
||||
|
||||
``prune.cfg``
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/prune/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/prune/config.rst
|
||||
|
||||
|
||||
``tape.cfg``
|
||||
~~~~~~~~~~~~
|
||||
|
||||
|
55
docs/external-metric-server.rst
Normal file
@ -0,0 +1,55 @@
|
||||
External Metric Server
|
||||
----------------------
|
||||
|
||||
Proxmox Backup Server periodically sends various metrics about your host's memory,
|
||||
network and disk activity to configured external metric servers.
|
||||
|
||||
Currently supported are:
|
||||
|
||||
* InfluxDB (HTTP) (see https://docs.influxdata.com/influxdb/v2/ )
|
||||
* InfluxDB (UDP) (see https://docs.influxdata.com/influxdb/v1/ )
|
||||
|
||||
The external metric server definitions are saved in
|
||||
'/etc/proxmox-backup/metricserver.cfg', and can be edited through the web
|
||||
interface.
|
||||
|
||||
.. note::
|
||||
|
||||
Using HTTP is recommended as UDP support has been dropped in InfluxDB v2.
|
||||
|
||||
InfluxDB (HTTP) plugin configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The plugin can be configured to use the HTTP(s) API of InfluxDB 2.x.
|
||||
InfluxDB 1.8.x does contain a forwards compatible API endpoint for this v2 API.
|
||||
|
||||
Since InfluxDB's v2 API is only available with authentication, you have
|
||||
to generate a token that can write into the correct bucket and set it.
|
||||
|
||||
In the v2 compatible API of 1.8.x, you can use 'user:password' as token
|
||||
(if required), and can omit the 'organization' since that has no meaning in InfluxDB 1.x.
|
||||
|
||||
You can also set the maximum batch size (default 25000000 bytes) with the
|
||||
'max-body-size' setting (this corresponds to the InfluxDB setting with the
|
||||
same name).
|
||||
|
||||
InfluxDB (UDP) plugin configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox Backup Server can also send data via UDP. This requires the InfluxDB
|
||||
server to be configured correctly. The MTU can also be configured here if
|
||||
necessary.
|
||||
|
||||
Here is an example configuration for InfluxDB (on your InfluxDB server):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
[[udp]]
|
||||
enabled = true
|
||||
bind-address = "0.0.0.0:8089"
|
||||
database = "proxmox"
|
||||
batch-size = 1000
|
||||
batch-timeout = "1s"
|
||||
|
||||
With this configuration, the InfluxDB server listens on all IP addresses on
|
||||
port 8089, and writes the data in the *proxmox* database.
|
10
docs/gui.rst
@ -40,6 +40,16 @@ Proxmox Backup Server supports various languages and authentication back ends
|
||||
.. note:: For convenience, you can save the username on the client side, by
|
||||
selecting the "Save User name" checkbox at the bottom of the window.
|
||||
|
||||
.. _consent_banner:
|
||||
|
||||
Consent Banner
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
A custom consent banner that has to be accepted before login can be configured
|
||||
in **Configuration -> Other -> General -> Consent Text**. If there is no
|
||||
content, the consent banner will not be displayed. The text will be stored as a
|
||||
base64 string in the ``/etc/proxmox-backup/node.cfg`` config file.
|
||||
|
||||
|
||||
GUI Overview
|
||||
------------
|
||||
|
BIN
docs/images/screenshots/pbs-installer-grub-menu.png
Normal file
After Width: | Height: | Size: 65 KiB |
BIN
docs/images/screenshots/pbs-installer-location.png
Normal file
After Width: | Height: | Size: 143 KiB |
BIN
docs/images/screenshots/pbs-installer-network.png
Normal file
After Width: | Height: | Size: 153 KiB |
BIN
docs/images/screenshots/pbs-installer-password.png
Normal file
After Width: | Height: | Size: 141 KiB |
BIN
docs/images/screenshots/pbs-installer-progress.png
Normal file
After Width: | Height: | Size: 162 KiB |
BIN
docs/images/screenshots/pbs-installer-select-disk.png
Normal file
After Width: | Height: | Size: 164 KiB |
BIN
docs/images/screenshots/pbs-installer-summary.png
Normal file
After Width: | Height: | Size: 139 KiB |
BIN
docs/images/screenshots/pbs-tui-installer.png
Normal file
After Width: | Height: | Size: 4.6 KiB |
157
docs/installation-media.rst
Normal file
@ -0,0 +1,157 @@
|
||||
.. _installation_medium:
|
||||
|
||||
Installation Medium
|
||||
-------------------
|
||||
|
||||
Proxmox Backup Server can be installed via
|
||||
:ref:`different methods <install_pbs>`. The recommended method is the
|
||||
usage of an installation medium, to simply boot the interactive
|
||||
installer.
|
||||
|
||||
Prepare Installation Medium
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Download the installer ISO image from |DOWNLOADS|.
|
||||
|
||||
The Proxmox Backup Server installation medium is a hybrid ISO image.
|
||||
It works in two ways:
|
||||
|
||||
- An ISO image file ready to burn to a DVD.
|
||||
|
||||
- A raw sector (IMG) image file ready to copy to a USB flash drive (USB stick).
|
||||
|
||||
Using a USB flash drive to install Proxmox Backup Server is the
|
||||
recommended way since it is the faster and more frequently available
|
||||
option these days.
|
||||
|
||||
Prepare a USB Flash Drive as Installation Medium
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The flash drive needs to have at least 2 GB of storage space.
|
||||
|
||||
.. note::
|
||||
|
||||
Do not use *UNetbootin*. It does not work with the Proxmox Backup
|
||||
Server installation image.
|
||||
|
||||
.. important::
|
||||
|
||||
Existing data on the USB flash drive will be overwritten.
|
||||
Therefore, make sure that it does not contain any still needed data
|
||||
and unmount it afterwards again before proceeding.
|
||||
|
||||
Instructions for GNU/Linux
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
On Unix-like operating systems use the ``dd`` command to copy the ISO
|
||||
image to the USB flash drive. First find the correct device name of the
|
||||
USB flash drive (see below). Then run the ``dd`` command. Depending on
|
||||
your environment, you will need to have root privileges to execute
|
||||
``dd``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# dd bs=1M conv=fdatasync if=./proxmox-backup-server_*.iso of=/dev/XYZ
|
||||
|
||||
.. note::
|
||||
|
||||
Be sure to replace ``/dev/XYZ`` with the correct device name and adapt
|
||||
the input filename (*if*) path.
|
||||
|
||||
.. caution::
|
||||
|
||||
Be very careful, and do not overwrite the wrong disk!
|
||||
|
||||
Find the Correct USB Device Name
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
There are two ways to find out the name of the USB flash drive. The
|
||||
first one is to compare the last lines of the ``dmesg`` command output
|
||||
before and after plugging in the flash drive. The second way is to
|
||||
compare the output of the ``lsblk`` command. Open a terminal and run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# lsblk
|
||||
|
||||
Then plug in your USB flash drive and run the command again:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# lsblk
|
||||
|
||||
A new device will appear. This is the one you want to use. To be on the
|
||||
extra safe side check if the reported size matches your USB flash drive.
|
||||
|
||||
Instructions for macOS
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Open the terminal (query *Terminal* in Spotlight).
|
||||
|
||||
Convert the ``.iso`` file to ``.dmg`` format using the convert option of
|
||||
``hdiutil``, for example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# hdiutil convert proxmox-backup-server_*.iso -format UDRW -o proxmox-backup-server_*.dmg
|
||||
|
||||
.. note::
|
||||
|
||||
macOS tends to automatically add ``.dmg`` to the output file name.
|
||||
|
||||
To get the current list of devices run the command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# diskutil list
|
||||
|
||||
Now insert the USB flash drive and run this command again to determine
|
||||
which device node has been assigned to it. (e.g., ``/dev/diskX``).
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# diskutil list
|
||||
# diskutil unmountDisk /dev/diskX
|
||||
|
||||
.. note::
|
||||
|
||||
replace *X* with the disk number from the last command.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# sudo dd if=proxmox-backup-server_*.dmg bs=1M of=/dev/rdiskX
|
||||
|
||||
.. note::
|
||||
|
||||
*rdiskX*, instead of *diskX*, in the last command is intended. It
|
||||
will increase the write speed.
|
||||
|
||||
Instructions for Windows
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Using Etcher
|
||||
^^^^^^^^^^^^
|
||||
|
||||
Etcher works out of the box. Download Etcher from https://etcher.io. It
|
||||
will guide you through the process of selecting the ISO and your USB
|
||||
flash drive.
|
||||
|
||||
Using Rufus
|
||||
^^^^^^^^^^^
|
||||
|
||||
Rufus is a more lightweight alternative, but you need to use the **DD
|
||||
mode** to make it work. Download Rufus from https://rufus.ie/. Either
|
||||
install it or use the portable version. Select the destination drive
|
||||
and the downloaded Proxmox ISO file.
|
||||
|
||||
.. important::
|
||||
|
||||
Once you click *Start*, you have to click *No* on the dialog asking to
|
||||
download a different version of Grub. In the next dialog select **DD mode**.
|
||||
|
||||
Use the Installation Medium
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Insert the created USB flash drive (or DVD) into your server. Continue
|
||||
by reading the :ref:`installer <using_the_installer>` chapter, which
|
||||
also describes possible boot issues.
|
@ -7,7 +7,9 @@ Debian_ from the provided package repository.
|
||||
|
||||
.. include:: system-requirements.rst
|
||||
|
||||
.. include:: package-repositories.rst
|
||||
.. include:: installation-media.rst
|
||||
|
||||
.. _install_pbs:
|
||||
|
||||
Server Installation
|
||||
-------------------
|
||||
@ -18,44 +20,37 @@ for various management tasks such as disk management.
|
||||
.. note:: You always need a backup server. It is not possible to use
|
||||
Proxmox Backup without the server part.
|
||||
|
||||
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
||||
as well as all necessary packages for the Proxmox Backup Server.
|
||||
Using our provided disk image (ISO file) is the recommended
|
||||
installation method, as it includes a convenient installer, a complete
|
||||
Debian system as well as all necessary packages for the Proxmox Backup
|
||||
Server.
|
||||
|
||||
The installer will guide you through the setup process and allow
|
||||
you to partition the local disk(s), apply basic system configuration
|
||||
(for example timezone, language, network), and install all required packages.
|
||||
The provided ISO will get you started in just a few minutes, and is the
|
||||
recommended method for new and existing users.
|
||||
Once you have created an :ref:`installation_medium`, the booted
|
||||
:ref:`installer <using_the_installer>` will guide you through the
|
||||
setup process. It will help you to partition your disks, apply basic
|
||||
settings such as the language, time zone and network configuration,
|
||||
and finally install all required packages within minutes.
|
||||
|
||||
Alternatively, Proxmox Backup Server can be installed on top of an
|
||||
existing Debian system.
|
||||
As an alternative to the interactive installer, advanced users may
|
||||
wish to install Proxmox Backup Server
|
||||
:ref:`unattended <install_pbs_unattended>`.
|
||||
|
||||
Install `Proxmox Backup`_ Server using the Installer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
With sufficient Debian knowledge, you can also install Proxmox Backup
|
||||
Server :ref:`on top of Debian <install_pbs_on_debian>` yourself.
|
||||
|
||||
Download the ISO from |DOWNLOADS|.
|
||||
It includes the following:
|
||||
While not recommended, Proxmox Backup Server could also be installed
|
||||
:ref:`on Proxmox VE <install_pbs_on_pve>`.
|
||||
|
||||
* The Proxmox Backup Server installer, which partitions the local
|
||||
disk(s) with ext4, xfs or ZFS, and installs the operating system
|
||||
.. include:: using-the-installer.rst
|
||||
|
||||
* Complete operating system (Debian Linux, 64-bit)
|
||||
|
||||
* Proxmox Linux kernel with ZFS support
|
||||
|
||||
* Complete tool-set to administer backups and all necessary resources
|
||||
|
||||
* Web based management interface
|
||||
|
||||
.. note:: During the installation process, the complete server
|
||||
is used by default and all existing data is removed.
|
||||
.. _install_pbs_unattended:
|
||||
|
||||
Install `Proxmox Backup`_ Server Unattended
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
It is possible to install {pve} automatically in an unattended manner. This
|
||||
enables you to fully automate the setup process on bare-metal. Once the
|
||||
installation is complete and the host has booted up, automation tools like
|
||||
Ansible can be used to further configure the installation.
|
||||
It is possible to install Proxmox Backup Server automatically in an
|
||||
unattended manner. This enables you to fully automate the setup process on
|
||||
bare-metal. Once the installation is complete and the host has booted up,
|
||||
automation tools like Ansible can be used to further configure the installation.
|
||||
|
||||
The necessary options for the installer must be provided in an answer file.
|
||||
This file allows the use of filter rules to determine which disks and network
|
||||
@ -66,6 +61,7 @@ installation ISO. For more details and information on the unattended
|
||||
installation see `our wiki
|
||||
<https://pve.proxmox.com/wiki/Automated_Installation>`_.
|
||||
|
||||
.. _install_pbs_on_debian:
|
||||
|
||||
Install `Proxmox Backup`_ Server on Debian
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -103,6 +99,8 @@ support, and a set of common and useful packages.
|
||||
your web browser, using HTTPS on port 8007. For example at
|
||||
``https://<ip-or-dns-name>:8007``
|
||||
|
||||
.. _install_pbs_on_pve:
|
||||
|
||||
Install Proxmox Backup Server on `Proxmox VE`_
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -123,6 +121,8 @@ After configuring the
|
||||
your web browser, using HTTPS on port 8007. For example at
|
||||
``https://<ip-or-dns-name>:8007``
|
||||
|
||||
.. _install_pbc:
|
||||
|
||||
Client Installation
|
||||
-------------------
|
||||
|
||||
@ -138,7 +138,7 @@ you need to run:
|
||||
# apt update
|
||||
# apt install proxmox-backup-client
|
||||
|
||||
|
||||
.. note:: The client-only repository should be usable by most recent Debian and
|
||||
Ubuntu derivatives.
|
||||
|
||||
.. include:: package-repositories.rst
|
||||
|
@ -264,6 +264,7 @@ systems with more than 256 GiB of total memory, where simply setting
|
||||
|
||||
# update-initramfs -u
|
||||
|
||||
.. _zfs_swap:
|
||||
|
||||
Swap on ZFS
|
||||
^^^^^^^^^^^
|
||||
|
@ -108,7 +108,7 @@ Ext.define('PageCalibration', {
|
||||
xtype: 'numberfield',
|
||||
value: 'a4',
|
||||
name: 's_x',
|
||||
fieldLabel: 'Meassured Start Offset Sx (mm)',
|
||||
fieldLabel: 'Measured Start Offset Sx (mm)',
|
||||
allowBlank: false,
|
||||
labelWidth: 200,
|
||||
},
|
||||
@ -116,7 +116,7 @@ Ext.define('PageCalibration', {
|
||||
xtype: 'numberfield',
|
||||
value: 'a4',
|
||||
name: 'd_x',
|
||||
fieldLabel: 'Meassured Length Dx (mm)',
|
||||
fieldLabel: 'Measured Length Dx (mm)',
|
||||
allowBlank: false,
|
||||
labelWidth: 200,
|
||||
},
|
||||
@ -124,7 +124,7 @@ Ext.define('PageCalibration', {
|
||||
xtype: 'numberfield',
|
||||
value: 'a4',
|
||||
name: 's_y',
|
||||
fieldLabel: 'Meassured Start Offset Sy (mm)',
|
||||
fieldLabel: 'Measured Start Offset Sy (mm)',
|
||||
allowBlank: false,
|
||||
labelWidth: 200,
|
||||
},
|
||||
@ -132,7 +132,7 @@ Ext.define('PageCalibration', {
|
||||
xtype: 'numberfield',
|
||||
value: 'a4',
|
||||
name: 'd_y',
|
||||
fieldLabel: 'Meassured Length Dy (mm)',
|
||||
fieldLabel: 'Measured Length Dy (mm)',
|
||||
allowBlank: false,
|
||||
labelWidth: 200,
|
||||
},
|
||||
|
@ -6,8 +6,34 @@ Maintenance Tasks
|
||||
Pruning
|
||||
-------
|
||||
|
||||
Prune lets you specify which backup snapshots you want to keep.
|
||||
The following retention options are available:
|
||||
Prune lets you specify which backup snapshots you want to keep, removing others.
|
||||
When pruning a snapshot, only the snapshot metadata (manifest, indices, blobs,
|
||||
log and notes) is removed. The chunks containing the actual backup data and
|
||||
previously referenced by the pruned snapshot, have to be removed by a garbage
|
||||
collection run.
|
||||
|
||||
.. Caution:: Take into consideration that sensitive information stored in a
|
||||
given data chunk will outlive pruned snapshots and remain present in the
|
||||
datastore as long as referenced by at least one backup snapshot. Further,
|
||||
*even* if no snapshot references a given chunk, it will remain present until
|
||||
removed by the garbage collection.
|
||||
|
||||
Moreover, file-level backups created using the change detection mode
|
||||
``metadata`` can reference backup chunks containing files which have vanished
|
||||
since the previous backup. These files might still be accessible by reading
|
||||
the chunks raw data (client or server side).
|
||||
|
||||
To remove chunks containing sensitive data, prune any snapshot made while the
|
||||
data was part of the backup input and run a garbage collection. Further, if
|
||||
using file-based backups with change detection mode ``metadata``,
|
||||
additionally prune all snapshots since the sensitive data was no longer part
|
||||
of the backup input and run a garbage collection.
|
||||
|
||||
The no longer referenced chunks will then be marked for deletion on the next
|
||||
garbage collection run and removed by a subsequent run after the grace
|
||||
period.
|
||||
|
||||
The following retention options are available for pruning:
|
||||
|
||||
``keep-last <N>``
|
||||
Keep the last ``<N>`` backup snapshots.
|
||||
@ -171,6 +197,8 @@ It's recommended to setup a schedule to ensure that unused space is cleaned up
|
||||
periodically. For most setups a weekly schedule provides a good interval to
|
||||
start.
|
||||
|
||||
.. _gc_background:
|
||||
|
||||
GC Background
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
@ -196,17 +224,31 @@ datastore or interfering with other backups.
|
||||
The garbage collection (GC) process is performed per datastore and is split
|
||||
into two phases:
|
||||
|
||||
- Phase one: Mark
|
||||
All index files are read, and the access time of the referred chunk files is
|
||||
updated.
|
||||
- Phase one (Mark):
|
||||
|
||||
- Phase two: Sweep
|
||||
The task iterates over all chunks, checks their file access time, and if it
|
||||
is older than the cutoff time (i.e., the time when GC started, plus some
|
||||
headroom for safety and Linux file system behavior), the task knows that the
|
||||
chunk was neither referred to in any backup index nor part of any currently
|
||||
running backup that has no index to scan for. As such, the chunk can be
|
||||
safely deleted.
|
||||
All index files are read, and the access time (``atime``) of the referenced
|
||||
chunk files is updated.
|
||||
|
||||
- Phase two (Sweep):
|
||||
|
||||
The task iterates over all chunks and checks their file access time against a
|
||||
cutoff time. The cutoff time is given by either the oldest backup writer
|
||||
instance, if present, or 24 hours and 5 minutes before the start of the
|
||||
garbage collection.
|
||||
|
||||
Garbage collection considers chunk files with access time older than the
|
||||
cutoff time to be neither referenced by any backup snapshot's index, nor part
|
||||
of any currently running backup job. Therefore, these chunks can safely be
|
||||
deleted.
|
||||
|
||||
Chunks within the grace period will not be deleted and logged at the end of
|
||||
the garbage collection task as *Pending removals*.
|
||||
|
||||
.. note:: The grace period for backup chunk removal is not arbitrary, but stems
|
||||
from the fact that filesystems are typically mounted with the ``relatime``
|
||||
option by default. This results in better performance by only updating the
|
||||
``atime`` property if a file has been modified since the last access or the
|
||||
last access has been at least 24 hours ago.
|
||||
|
||||
Manually Starting GC
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
@ -69,6 +69,9 @@ sync-job`` command. The configuration information for sync jobs is stored at
|
||||
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
|
||||
either start it manually from the GUI or provide it with a schedule (see
|
||||
:ref:`calendar-event-scheduling`) to run regularly.
|
||||
Backup snapshots, groups and namespaces which are no longer available on the
|
||||
**Remote** datastore can be removed from the local datastore as well by setting
|
||||
the ``remove-vanished`` option for the sync job.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -132,6 +135,12 @@ For mixing include and exclude filter, following rules apply:
|
||||
|
||||
.. note:: The ``protected`` flag of remote backup snapshots will not be synced.
|
||||
|
||||
Enabling the advanced option 'resync-corrupt' will re-sync all snapshots that have
|
||||
failed to verify during the last :ref:`maintenance_verification`. Hence, a verification
|
||||
job needs to be run before a sync job with 'resync-corrupt' can be carried out. Be aware
|
||||
that a 'resync-corrupt'-job needs to check the manifests of all snapshots in a datastore
|
||||
and might take much longer than regular sync jobs.
|
||||
|
||||
Namespace Support
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -224,3 +233,43 @@ the web interface or using the ``proxmox-backup-manager`` command-line tool:
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager sync-job update ID --rate-in 20MiB
|
||||
|
||||
Sync Direction Push
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Sync jobs can be configured for pull or push direction. Sync jobs in push
|
||||
direction are not identical in behaviour because of the limited access to the
|
||||
target datastore via the remote servers API. Most notably, pushed content will
|
||||
always be owned by the user configured in the remote configuration, being
|
||||
independent from the local user as configured in the sync job. Latter is used
|
||||
exclusively for permission check and scope checks on the pushing side.
|
||||
|
||||
.. note:: It is strongly advised to create a dedicated remote configuration for
|
||||
each individual sync job in push direction, using a dedicated user on the
|
||||
remote. Otherwise, sync jobs pushing to the same target might remove each
|
||||
others snapshots and/or groups, if the remove vanished flag is set or skip
|
||||
snapshots if the backup time is not incremental.
|
||||
This is because the backup groups on the target are owned by the user
|
||||
given in the remote configuration.
|
||||
|
||||
The following permissions are required for a sync job in push direction:
|
||||
|
||||
#. ``Remote.Audit`` on ``/remote/{remote}`` and ``Remote.DatastoreBackup`` on
|
||||
``/remote/{remote}/{remote-store}/{remote-ns}`` path or subnamespace.
|
||||
#. At least ``Datastore.Read`` and ``Datastore.Audit`` on the local source
|
||||
datastore namespace (``/datastore/{store}/{ns}``) or ``Datastore.Backup`` if
|
||||
owner of the sync job.
|
||||
#. ``Remote.DatastorePrune`` on ``/remote/{remote}/{remote-store}/{remote-ns}``
|
||||
path to remove vanished snapshots and groups. Make sure to use a dedicated
|
||||
remote for each sync job in push direction as noted above.
|
||||
#. ``Remote.DatastoreModify`` on ``/remote/{remote}/{remote-store}/{remote-ns}``
|
||||
path to remove vanished namespaces. A remote user with limited access should
|
||||
be used on the remote backup server instance. Consider the implications as
|
||||
noted below.
|
||||
|
||||
.. note:: ``Remote.DatastoreModify`` will allow to remove whole namespaces on the
|
||||
remote target datastore, independent of ownership. Make sure the user as
|
||||
configured in remote.cfg has limited permissions on the remote side.
|
||||
|
||||
.. note:: Sync jobs in push direction require namespace support on the remote
|
||||
Proxmox Backup Server instance (minimum version 2.2).
|
||||
|
@ -83,8 +83,115 @@ allows you to send push notifications to various devices and
|
||||
applications. It provides a simple API and web interface, making it easy to
|
||||
integrate with different platforms and services.
|
||||
|
||||
.. NOTE:: Gotify targets will respect the HTTP proxy settings from
|
||||
Configuration → Other → HTTP proxy
|
||||
|
||||
See :ref:`notifications.cfg` for all configuration options.
|
||||
|
||||
.. _notification_targets_webhook:
|
||||
Webhook
|
||||
^^^^^^^
|
||||
Webhook notification targets perform HTTP requests to a configurable URL.
|
||||
|
||||
The following configuration options are available:
|
||||
|
||||
* ``url``: The URL to which to perform the HTTP requests.
|
||||
Supports templating to inject message contents, metadata and secrets.
|
||||
* ``method``: HTTP Method to use (POST/PUT/GET)
|
||||
* ``header``: Array of HTTP headers that should be set for the request.
|
||||
Supports templating to inject message contents, metadata and secrets.
|
||||
* ``body``: HTTP body that should be sent.
|
||||
Supports templating to inject message contents, metadata and secrets.
|
||||
* ``secret``: Array of secret key-value pairs. These will be stored in
|
||||
a protected configuration file only readable by root. Secrets can be
|
||||
accessed in body/header/URL templates via the ``secrets`` namespace.
|
||||
* ``comment``: Comment for this target.
|
||||
|
||||
For configuration options that support templating, the
|
||||
`Handlebars <https://handlebarsjs.com>`_ syntax can be used to
|
||||
access the following properties:
|
||||
|
||||
* ``{{ title }}``: The rendered notification title
|
||||
* ``{{ message }}``: The rendered notification body
|
||||
* ``{{ severity }}``: The severity of the notification (``info``, ``notice``,
|
||||
``warning``, ``error``, ``unknown``)
|
||||
* ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in seconds).
|
||||
* ``{{ fields.<name> }}``: Sub-namespace for any metadata fields of the
|
||||
notification. For instance, ``fields.type`` contains the notification
|
||||
type - for all available fields refer to :ref:`notification_events`.
|
||||
* ``{{ secrets.<name> }}``: Sub-namespace for secrets. For instance, a secret
|
||||
named ``token`` is accessible via ``secrets.token``.
|
||||
|
||||
For convenience, the following helpers are available:
|
||||
|
||||
* ``{{ url-encode <value/property> }}``: URL-encode a property/literal.
|
||||
* ``{{ escape <value/property> }}``: Escape any control characters that cannot
|
||||
be safely represented as a JSON string.
|
||||
* ``{{ json <value/property> }}``: Render a value as JSON. This can be useful
|
||||
to pass a whole sub-namespace (e.g. ``fields``) as a part of a JSON payload
|
||||
(e.g. ``{{ json fields }}``).
|
||||
|
||||
|
||||
.. NOTE:: Webhook targets will respect the HTTP proxy settings from
|
||||
Configuration → Other → HTTP proxy
|
||||
|
||||
Example - ntfy.sh
|
||||
"""""""""""""""""
|
||||
|
||||
* Method: ``POST``
|
||||
* URL: ``https://ntfy.sh/{{ secrets.channel }}``
|
||||
* Headers:
|
||||
|
||||
* ``Markdown``: ``Yes``
|
||||
* Body::
|
||||
|
||||
```
|
||||
{{ message }}
|
||||
```
|
||||
|
||||
* Secrets:
|
||||
|
||||
* ``channel``: ``<your ntfy.sh channel>``
|
||||
|
||||
Example - Discord
|
||||
"""""""""""""""""
|
||||
|
||||
* Method: ``POST``
|
||||
* URL: ``https://discord.com/api/webhooks/{{ secrets.token }}``
|
||||
* Headers:
|
||||
|
||||
* ``Content-Type``: ``application/json``
|
||||
|
||||
* Body::
|
||||
|
||||
{
|
||||
"content": "``` {{ escape message }}```"
|
||||
}
|
||||
|
||||
* Secrets:
|
||||
|
||||
* ``token``: ``<token>``
|
||||
|
||||
Example - Slack
|
||||
"""""""""""""""
|
||||
|
||||
* Method: ``POST``
|
||||
* URL: ``https://hooks.slack.com/services/{{ secrets.token }}``
|
||||
* Headers:
|
||||
|
||||
* ``Content-Type``: ``application/json``
|
||||
|
||||
* Body::
|
||||
|
||||
{
|
||||
"text": "``` {{escape message}}```",
|
||||
"type": "mrkdwn"
|
||||
}
|
||||
|
||||
* Secrets:
|
||||
|
||||
* ``token``: ``<token>``
|
||||
|
||||
.. _notification_matchers:
|
||||
|
||||
Notification Matchers
|
||||
|
@ -149,7 +149,7 @@ Currently there's only a client-repository for APT based systems.
|
||||
.. _package_repositories_client_only_apt:
|
||||
|
||||
APT-based Proxmox Backup Client Repository
|
||||
++++++++++++++++++++++++++++++++++++++++++
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For modern Linux distributions using `apt` as package manager, like all Debian
|
||||
and Ubuntu Derivative do, you may be able to use the APT-based repository.
|
||||
|
@ -126,7 +126,8 @@ Ext.onReady(function() {
|
||||
if (data.mark !== 'keep') {
|
||||
return `<div style="text-decoration: line-through;">${text}</div>`;
|
||||
}
|
||||
if (me.useColors) {
|
||||
let pruneList = this.up('prunesimulatorPruneList');
|
||||
if (pruneList.useColors) {
|
||||
let bgColor = COLORS[data.keepName];
|
||||
let textColor = TEXT_COLORS[data.keepName];
|
||||
return `<div style="background-color: ${bgColor};color: ${textColor};">${text}</div>`;
|
||||
@ -353,12 +354,17 @@ Ext.onReady(function() {
|
||||
specValues.forEach(function(value) {
|
||||
if (value.includes('..')) {
|
||||
let [start, end] = value.split('..');
|
||||
let step = 1;
|
||||
if (end.includes('/')) {
|
||||
[end, step] = end.split('/');
|
||||
step = assertValid(step);
|
||||
}
|
||||
start = assertValid(start);
|
||||
end = assertValid(end);
|
||||
if (start > end) {
|
||||
throw "interval start is bigger then interval end '" + start + " > " + end + "'";
|
||||
}
|
||||
for (let i = start; i <= end; i++) {
|
||||
for (let i = start; i <= end; i += step) {
|
||||
matches[i] = 1;
|
||||
}
|
||||
} else if (value.includes('/')) {
|
||||
|
@ -165,6 +165,74 @@ following command creates a new datastore called ``store1`` on
|
||||
# proxmox-backup-manager datastore create store1 /backup/disk1/store1
|
||||
|
||||
|
||||
Removable Datastores
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
Removable datastores have a ``backing-device`` associated with them, they can be
|
||||
mounted and unmounted. Other than that they behave the same way a normal datastore
|
||||
would.
|
||||
|
||||
They can be created on already correctly formatted partitions, which should be
|
||||
either ``ext4`` or ``xfs`` as with normal datastores, but most modern file
|
||||
systems supported by the Proxmox Linux kernel should work.
|
||||
|
||||
.. note:: FAT-based file systems do not support the POSIX file ownership
|
||||
concept and have relatively low limits on the number of files per directory.
|
||||
Therefore, creating a datastore is not supported on FAT file systems.
|
||||
Because some external drives are preformatted with such a FAT-based file
|
||||
system, you may need to reformat the drive before you can use it as a
|
||||
backing-device for a removable datastore.
|
||||
|
||||
It is also possible to create them on completely unused disks through
|
||||
"Administration" > "Disks / Storage" > "Directory", using this method the disk will
|
||||
be partitioned and formatted automatically for the datastore.
|
||||
|
||||
Devices with only one datastore on them will be mounted automatically. Unmounting has
|
||||
to be done through the UI by clicking "Unmount" on the summary page or using the CLI.
|
||||
If unmounting fails, the reason is logged in the unmount task log, and the
|
||||
datastore will stay in maintenance mode ``unmounting``, which prevents any IO
|
||||
operations. In such cases, the maintenance mode has to be reset manually using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore update --maintenance-mode offline
|
||||
|
||||
to prevent any IO, or to clear it use:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore update --delete maintenance-mode
|
||||
|
||||
|
||||
A single device can house multiple datastores, they only limitation is that they are not
|
||||
allowed to be nested.
|
||||
|
||||
Removable datastores are created on the the device with the given relative path that is specified
|
||||
on creation. In order to use a datastore on multiple PBS instances, it has to be created on one,
|
||||
and added with ``Reuse existing datastore`` checked on the others. The path you set on creation
|
||||
is how multiple datastores on a single device are identified. So when adding on a new PBS instance,
|
||||
it has to match what was set on creation.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore unmount store1
|
||||
|
||||
both will wait for any running tasks to finish and unmount the device.
|
||||
|
||||
All removable datastores are mounted under /mnt/datastore/<name>, and the specified path
|
||||
refers to the path on the device.
|
||||
|
||||
All datastores present on a device can be listed using ``proxmox-backup-debug``.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-debug inspect device /dev/...
|
||||
|
||||
|
||||
Verify, Prune and Garbage Collection jobs are skipped if the removable
|
||||
datastore is not mounted when they are scheduled. Sync jobs start, but fail
|
||||
with an error saying the datastore was not mounted. The reason is that syncs
|
||||
not happening as scheduled should at least be noticeable.
|
||||
|
||||
Managing Datastores
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
@ -30,6 +30,8 @@ please refer to the standard Debian documentation.
|
||||
|
||||
.. include:: certificate-management.rst
|
||||
|
||||
.. include:: external-metric-server.rst
|
||||
|
||||
.. include:: services.rst
|
||||
|
||||
.. include:: command-line-tools.rst
|
||||
|
@ -6,6 +6,8 @@ production. To further decrease the impact of a failed host, you can set up
|
||||
periodic, efficient, incremental :ref:`datastore synchronization <syncjobs>`
|
||||
from other Proxmox Backup Server instances.
|
||||
|
||||
.. _minimum_system_requirements:
|
||||
|
||||
Minimum Server Requirements, for Evaluation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -61,6 +61,7 @@ In general, LTO tapes offer the following advantages:
|
||||
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
||||
tape compression feature has no advantage.
|
||||
|
||||
.. _tape-supported-hardware:
|
||||
|
||||
Supported Hardware
|
||||
------------------
|
||||
@ -969,6 +970,8 @@ You can restore from a tape even without an existing catalog, but only the
|
||||
whole media set. If you do this, the catalog will be automatically created.
|
||||
|
||||
|
||||
.. _tape_key_management:
|
||||
|
||||
Encryption Key Management
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -1180,3 +1183,159 @@ In combination with fitting prune settings and tape backup schedules, this
|
||||
achieves long-term storage of some backups, while keeping the recent
|
||||
backups on smaller media sets that expire roughly every 4 weeks (that is, three
|
||||
plus the current week).
|
||||
|
||||
|
||||
Disaster Recovery
|
||||
-----------------
|
||||
|
||||
.. _Command-line Tools: command-line-tools.html
|
||||
|
||||
In case of major disasters, important data, or even whole servers might be
|
||||
destroyed or at least damaged up to the point where everything - sometimes
|
||||
including the backup server - has to be restored from a backup. For such cases,
|
||||
the following step-by-step guide will help you to set up the Proxmox Backup
|
||||
Server and restore everything from tape backups.
|
||||
|
||||
The following guide will explain the necessary steps using both the web GUI and
|
||||
the command line tools. For an overview of the command line tools, see
|
||||
`Command-line Tools`_.
|
||||
|
||||
|
||||
Setting Up a Datastore
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. _proxmox-backup-manager: proxmox-backup-manager/man1.html
|
||||
|
||||
.. _Installation: installation.html
|
||||
|
||||
After you set up a new Proxmox Backup Server, as outlined in the `Installation`_
|
||||
chapter, first set up a datastore so a tape can be restored to it:
|
||||
|
||||
#. Go to **Administration -> Storage / Disks** and make sure that the disk that
|
||||
will be used as a datastore shows up.
|
||||
|
||||
#. Under the **Directory** or **ZFS** tabs, you can either choose to create a
|
||||
directory or create a ZFS ``zpool``, respectively. Here you can also directly
|
||||
add the newly created directory or ZFS ``zpool`` as a datastore.
|
||||
|
||||
Alternatively, the `proxmox-backup-manager`_ can be used to perform the same
|
||||
tasks. For more information, check the :ref:`datastore_intro` documentation.
|
||||
|
||||
|
||||
Setting Up the Tape Drive
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
#. Make sure you have a properly working tape drive and/or changer matching to
|
||||
medium you want to restore from.
|
||||
|
||||
#. Connect the tape changer(s) and the tape drive(s) to the backup server. These
|
||||
should be detected automatically by Linux. You can get a list of available
|
||||
drives using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape drive scan
|
||||
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
||||
│ path │ vendor │ model │ serial │
|
||||
╞════════════════════════════════╪════════╪═════════════╪════════╡
|
||||
│ /dev/tape/by-id/scsi-12345-sg │ IBM │ ULT3580-TD4 │ 12345 │
|
||||
└────────────────────────────────┴────────┴─────────────┴────────┘
|
||||
|
||||
You can get a list of available changers with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer scan
|
||||
┌─────────────────────────────┬─────────┬──────────────┬────────┐
|
||||
│ path │ vendor │ model │ serial │
|
||||
╞═════════════════════════════╪═════════╪══════════════╪════════╡
|
||||
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
||||
└─────────────────────────────┴─────────┴──────────────┴────────┘
|
||||
|
||||
For more information, please read the chapters
|
||||
on :ref:`tape_changer_config` and :ref:`tape_drive_config`.
|
||||
|
||||
#. If you have a tape changer, go to the web interface of the Proxmox Backup
|
||||
Server, go to **Tape Backup -> Changers** and add it. For examples using the
|
||||
command line, read the chapter on :ref:`tape_changer_config`. If the changer
|
||||
has been detected correctly by Linux, the changer should show up in the list.
|
||||
|
||||
#. In the web interface, go to **Tape Backup -> Drives** and add the tape drive
|
||||
that will be used to read the tapes. For examples using the command line,
|
||||
read the chapter on :ref:`tape_drive_config`. If the tape drive has been
|
||||
detected correctly by Linux, the drive should show up in the list. If the
|
||||
drive also has a tape changer, make sure to select the changer as well and
|
||||
assign it the correct drive number.
|
||||
|
||||
|
||||
Restoring Data From the Tape
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. _proxmox-tape: proxmox-tape/man1.html
|
||||
|
||||
.. _proxmox-backup-client: proxmox-backup-client/man1.html
|
||||
|
||||
.. _Restore: https://pve.proxmox.com/pve-docs/chapter-vzdump.html#vzdump_restore
|
||||
|
||||
The following guide will explain the steps necessary to restore data from a
|
||||
tape, which can be done over either the web GUI or the command line. For details
|
||||
on the command line, read the documentation on the `proxmox-tape`_ tool.
|
||||
|
||||
To restore data from tapes, do the following:
|
||||
|
||||
#. Insert the first tape (as displayed on the label) into the tape drive or, if
|
||||
a tape changer is available, use the tape changer to insert the tape into the
|
||||
right drive. The web GUI can also be used to load or transfer tapes between
|
||||
tape drives by selecting the changer.
|
||||
|
||||
#. If the backup has been encrypted, the encryption keys need to be restored as
|
||||
well. In the **Encryption Keys** tab, press **Restore Key**. For more
|
||||
details or examples that use the command line, read the
|
||||
:ref:`tape_key_management` chapter.
|
||||
|
||||
#. The procedure for restoring data is slightly different depending on whether
|
||||
you are using a standalone tape drive or a changer:
|
||||
|
||||
* For changers, the procedure is simple:
|
||||
|
||||
#. Insert all tapes from the media set you want to restore from.
|
||||
|
||||
#. Click on the changer in the web GUI, click **Inventory**, make sure
|
||||
**Restore Catalog** is selected and press OK.
|
||||
|
||||
* For standalone drives, the procedure would be:
|
||||
|
||||
#. Insert the first tape of the media set.
|
||||
|
||||
#. Click **Catalog**.
|
||||
|
||||
#. Eject the tape, then repeat the steps for the remaining tapes of the
|
||||
media set.
|
||||
|
||||
#. Go back to **Tape Backup**. In the **Content** tab, press **Restore** and
|
||||
select the desired media set. Choose the snapshot you want to restore, press
|
||||
**Next**, select the drive and target datastore and press **Restore**.
|
||||
|
||||
#. By going to the datastore where the data has been restored, under the
|
||||
**Content** tab you should be able to see the restored snapshots. In order to
|
||||
access the backups from another machine, you will need to configure the
|
||||
access to the backup server. Go to **Configuration -> Access Control** and
|
||||
either create a new user, or a new API token (API tokens allow easy
|
||||
revocation if the token is compromised). Under **Permissions**, add the
|
||||
desired permissions, e.g. **DatastoreBackup**.
|
||||
|
||||
#. You can now perform virtual machine, container or file restores. You now have
|
||||
the following options:
|
||||
|
||||
* If you want to restore files on Linux distributions that are not based on
|
||||
Proxmox products or you prefer using a command line tool, you can use the
|
||||
`proxmox-backup-client`_, as explained in the
|
||||
:ref:`client_restoring_data` chapter. Use the newly created API token to
|
||||
be able to access the data. You can then restore individual files or
|
||||
mount an archive to your system.
|
||||
|
||||
* If you want to restore virtual machines or containers on a Proxmox VE
|
||||
server, add the datastore of the backup server as storage and go to
|
||||
**Backups**. Here you can restore VMs and containers, including their
|
||||
configuration. For more information on restoring backups in Proxmox VE,
|
||||
visit the `Restore`_ chapter of the Proxmox VE documentation.
|
||||
|
@ -56,8 +56,9 @@ The chunks of a datastore are found in
|
||||
|
||||
<datastore-root>/.chunks/
|
||||
|
||||
This chunk directory is further subdivided by the first four bytes of the
|
||||
chunk's checksum, so a chunk with the checksum
|
||||
This chunk directory is further subdivided into directories grouping chunks by
|
||||
their checksums 2 byte prefix (given as 4 hexadecimal digits), so a chunk with
|
||||
the checksum
|
||||
|
||||
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
|
||||
|
||||
@ -133,6 +134,141 @@ This is done to speed up the client part of the backup, since it only needs to
|
||||
encrypt chunks that are actually getting uploaded. Chunks that exist already in
|
||||
the previous backup, do not need to be encrypted and uploaded.
|
||||
|
||||
Change Detection Mode for File-Based Backups
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The change detection mode controls how to detect and act for files which did not
|
||||
change in-between subsequent backup runs as well as the archive file format used
|
||||
to encode the directory entries.
|
||||
|
||||
There are 3 modes available, the current default ``legacy`` mode, as well as the
|
||||
``data`` and ``metadata`` mode. While the ``legacy`` mode encodes all contents
|
||||
in a single ``pxar`` archive, the latter two modes split data and metadata into
|
||||
``ppxar`` and ``mpxar`` archives. This is done to allow for fast comparison of
|
||||
metadata with the previous snapshot, used by the ``metadata`` mode to detect
|
||||
reusable files. The ``data`` mode refrains from reusing unchanged files by
|
||||
rechunking the file unconditionally. This mode therefore assures that no file
|
||||
changes are missed even if the metadata are unchanged.
|
||||
|
||||
.. NOTE:: ``pxar`` and ``mpxar``/``ppxar`` file formats are different and cannot
|
||||
be deduplicated as efficiently if a datastore stores archive snapshots of
|
||||
both types.
|
||||
|
||||
As the change detection modes are client side changes, they are backwards
|
||||
compatible with older versions of Proxmox Backup Server. Exploring the backup
|
||||
contents for the new archive format via the web interface requires however a
|
||||
Proxmox Backup Server with version 3.2.5 or higher. Upgrading to the latest
|
||||
version is recommended for full feature compatibility.
|
||||
|
||||
.. _change-detection-mode-legacy:
|
||||
|
||||
Legacy Mode
|
||||
+++++++++++
|
||||
|
||||
Backup snapshots of filesystems are created by recursively scanning the
|
||||
directory entries. All entries to be included in the snapshot are read and
|
||||
serialized by encoding them using the ``pxar``
|
||||
:ref:`archive format <pxar-format>`. The resulting stream is chunked into
|
||||
:ref:`dynamically sized chunks <dynamically-sized-chunks>` and uploaded to the
|
||||
Proxmox Backup Server, deduplicating chunks based on their content digest for
|
||||
space efficient storage.
|
||||
File contents are read and chunked unconditionally, no check is performed to
|
||||
detect unchanged files.
|
||||
|
||||
.. _change-detection-mode-data:
|
||||
|
||||
Data Mode
|
||||
+++++++++
|
||||
|
||||
Like for ``legacy`` mode file contents are read and chunked unconditionally, no
|
||||
check is performed to detect unchanged files.
|
||||
|
||||
However, in contrast to ``legacy`` mode, which stores entries metadata and data
|
||||
in a single self-contained ``pxar`` archive, the ``data`` mode encodes metadata
|
||||
and file contents into two separate streams. The resulting backup snapshots
|
||||
therefore contain split archives, an archive in ``mpxar``
|
||||
:ref:`format <pxar-meta-format>` containing the entries metadata and an archive
|
||||
with ``ppxar`` :ref:`format <ppxar-format>` , containing the actual file
|
||||
contents, separated by payload headers for consistency checks. The metadata
|
||||
archive stores a reference offset to the corresponding payload archive entry so
|
||||
the file contents can be accessed. Both of these archives are chunked and
|
||||
uploaded by the Proxmox backup client, resulting in separated indices and
|
||||
independent chunks.
|
||||
|
||||
The ``mpxar`` archive can be used to efficiently fetch the associated metadata
|
||||
for archive entries without the overhead of payload data stored within the same
|
||||
chunks. This is used for example for entry lookups to list the archive contents
|
||||
or to navigate the mounted filesystem via the FUSE implementation. No dedicated
|
||||
catalog is therefore created for archives encoded using this mode.
|
||||
|
||||
By not comparing metadata to the previous backup snapshot, no files will be
|
||||
considered reusable by this mode, in contrast to the ``metadata`` mode.
|
||||
Latter can reuse files which have changed, but file size and mtime did not
|
||||
change because restored after changing the files contents.
|
||||
|
||||
.. _change-detection-mode-metadata:
|
||||
|
||||
Metadata Mode
|
||||
+++++++++++++
|
||||
|
||||
The ``metadata`` mode detects files whose file metadata did not change
|
||||
in-between subsequent backup runs. The metadata comparison includes file size,
|
||||
file type, ownership and permission information, as well as acls and attributes
|
||||
and most importantly the file's mtime, for details see the
|
||||
:ref:`pxar metadata archive format <pxar-meta-format>`. Files ctime and inode
|
||||
number are not stored and used for comparison, since some tools (e.g.
|
||||
``vzdump``) might sync the contents of the filesystem to a temporary location
|
||||
before actually performing the backup via the Proxmox backup client. For these
|
||||
cases, ctime and inode number will always change.
|
||||
|
||||
This mode will avoid reading and rechunking the file contents whenever possible
|
||||
by reusing the file content chunks of unchanged files from the previous backup
|
||||
snapshot.
|
||||
|
||||
To compare the metadata, the previous snapshots ``mpxar`` metadata archive is
|
||||
downloaded at the start of the backup run and used as a reference. Further, the
|
||||
index of the payload archive ``ppxar`` is fetched and used to lookup the file
|
||||
content chunk's digests, which will be used to reindex pre-existing chunks
|
||||
without the need to reread and rechunk the file contents.
|
||||
|
||||
During backup, the metadata and payload archives are encoded in the same manner
|
||||
as for the ``data`` mode, but for the ``metadata`` mode each entry is
|
||||
additionally looked up in the metadata reference archive for comparison first.
|
||||
If the file did not change as compared to the reference, the file is considered
|
||||
as unchanged and the Proxmox backup client enters a look-ahead caching mode. In
|
||||
this mode, the client will keep reading and comparing then following entries in
|
||||
the filesystem as long as they are reusable. Further, it keeps track of the
|
||||
payload archive offset range these file contents are stored in. The additional
|
||||
look-ahead caching is needed, as file boundaries are not required to be aligned
|
||||
with chunk boundaries, therefore reused chunks can contain possibly wasted chunk
|
||||
content (also called padding) if reused unconditionally.
|
||||
|
||||
The look-ahead cache will greedily cache all unchanged entries up to the point
|
||||
where either the cache size limit is reached, a file entry with changed
|
||||
metadata is encountered, or the range of payload chunks considered for reuse is
|
||||
not continuous. An example for the latter is a file which disappeared in-between
|
||||
subsequent backup runs, leaving a hole in the range. At this point, the caching
|
||||
mode is disabled and the client calculates the wasted padding size which would
|
||||
be introduced by reusing the payload chunks for all the unchanged files cached
|
||||
up to this point. If the padding is acceptable (below a preset limit of 10% of
|
||||
the actually reused chunk content), the files are reused by encoding them in the
|
||||
metadata archive using updated offset references to the contents and reindexing
|
||||
the pre-existing chunks in the new ``ppxar`` archive. If however the padding is
|
||||
not acceptable, exceeding the limit, all cached entries are reencoded, not
|
||||
reusing any of the pre-existing data. The metadata as cached will be encoded in
|
||||
the metadata archive, no matter if cached file contents are to be reused or
|
||||
reencoded.
|
||||
|
||||
This combination of look-ahead caching and reuse of pre-existing payload archive
|
||||
chunks for files with unchanged contents therefore speeds up the backup
|
||||
process by avoiding rereading and rechunking file contents whenever possible.
|
||||
|
||||
To reduce paddings and increase chunk reusability, during creation of the
|
||||
archives in ``data`` mode and ``metadata`` mode the pxar encoder signals
|
||||
encountered file boundaries as suggested chunk boundaries to the sliding window
|
||||
chunker. The chunker then decides based on the internal state if the suggested
|
||||
boundary is accepted or disregarded.
|
||||
|
||||
Caveats and Limitations
|
||||
-----------------------
|
||||
|
||||
@ -162,8 +298,8 @@ will see that the probability of a collision in that scenario is:
|
||||
|
||||
For context, in a lottery game of guessing 6 numbers out of 45, the chance to
|
||||
correctly guess all 6 numbers is only :math:`1.2277 * 10^{-7}`. This means the
|
||||
chance of a collision is about the same as winning 13 such lottery games *in a
|
||||
row*.
|
||||
chance of a collision is lower than winning 8 such lottery games *in a row*:
|
||||
:math:`(1.2277 * 10^{-7})^{8} = 5.1623 * 10^{-56}`.
|
||||
|
||||
In conclusion, it is extremely unlikely that such a collision would occur by
|
||||
accident in a normal datastore.
|
||||
@ -183,6 +319,9 @@ read all files again for every backup, otherwise it would not be possible to
|
||||
generate a consistent, independent pxar archive where the original chunks can be
|
||||
reused. Note that in spite of this, only new or changed chunks will be uploaded.
|
||||
|
||||
In order to avoid these limitations, the Change Detection Mode ``metadata`` was
|
||||
introduced.
|
||||
|
||||
Verification of Encrypted Chunks
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
@ -663,7 +663,7 @@ address must be specified. Most options from :ref:`user_realms_ldap` apply to
|
||||
Active Directory as well, most importantly the bind credentials ``bind-dn``
|
||||
and ``password``. This is typically required by default for Microsoft Active
|
||||
Directory. The ``bind-dn`` can be specified either in AD-specific
|
||||
``user@company.net`` syntax or the commen LDAP-DN syntax.
|
||||
``user@company.net`` syntax or the common LDAP-DN syntax.
|
||||
|
||||
The authentication domain name must only be specified if anonymous bind is
|
||||
requested. If bind credentials are given, the domain name is automatically
|
||||
|
346
docs/using-the-installer.rst
Normal file
@ -0,0 +1,346 @@
|
||||
.. _using_the_installer:
|
||||
|
||||
Install `Proxmox Backup`_ Server using the Installer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Download the ISO from |DOWNLOADS|.
|
||||
It includes the following:
|
||||
|
||||
* The Proxmox Backup Server installer, which partitions the local
|
||||
disk(s) with ext4, xfs or ZFS, and installs the operating system
|
||||
|
||||
* Complete operating system (Debian Linux, 64-bit)
|
||||
|
||||
* Proxmox Linux kernel with ZFS support
|
||||
|
||||
* Complete toolset to administer backups and all necessary resources
|
||||
|
||||
* Web-based management interface
|
||||
|
||||
.. note:: Any existing data on the selected drives will be overwritten
|
||||
during the installation process. The installer does not add boot
|
||||
menu entries for other operating systems.
|
||||
|
||||
Please insert the :ref:`installation_medium` (for example, USB flash
|
||||
drive or DVD) and boot from it.
|
||||
|
||||
.. note:: You may need to go into your server's firmware settings, to
|
||||
enable booting from your installation medium (for example, USB) and
|
||||
set the desired boot order. When booting an installer prior to
|
||||
`Proxmox Backup`_ Server version 3.1, Secure Boot needs to be
|
||||
disabled.
|
||||
|
||||
.. image:: images/screenshots/pbs-installer-grub-menu.png
|
||||
:target: _images/pbs-installer-grub-menu.png
|
||||
:align: right
|
||||
:alt: Proxmox Backup Server Installer GRUB Menu
|
||||
|
||||
After choosing the correct entry (for example, *Boot from USB*) the
|
||||
Proxmox Backup Server menu will be displayed, and one of the following
|
||||
options can be selected:
|
||||
|
||||
**Install Proxmox Backup Server (Graphical)**
|
||||
|
||||
Starts the normal installation.
|
||||
|
||||
TIP: It's possible to use the installation wizard with a keyboard only. Buttons
|
||||
can be clicked by pressing the ``ALT`` key combined with the underlined character
|
||||
from the respective button. For example, ``ALT + N`` to press a ``Next`` button.
|
||||
|
||||
**Install Proxmox Backup Server (Console)**
|
||||
|
||||
Starts the terminal-mode installation wizard. It provides the same overall
|
||||
installation experience as the graphical installer, but has generally better
|
||||
compatibility with very old and very new hardware.
|
||||
|
||||
**Install Proxmox Backup Server (Terminal UI, Serial Console)**
|
||||
|
||||
Starts the terminal-mode installation wizard, additionally setting up the Linux
|
||||
kernel to use the (first) serial port of the machine for in- and output. This
|
||||
can be used if the machine is completely headless and only has a serial console
|
||||
available.
|
||||
|
||||
.. image:: images/screenshots/pbs-tui-installer.png
|
||||
:target: _images/pbs-tui-installer.png
|
||||
:align: right
|
||||
:alt: Proxmox Backup Server Terminal UI Installer
|
||||
|
||||
Both modes use the same code base for the actual installation process to
|
||||
benefit from more than a decade of bug fixes and ensure feature parity.
|
||||
|
||||
TIP: The *Console* or *Terminal UI* option can be used in case the graphical
|
||||
installer does not work correctly, due to e.g. driver issues. See also
|
||||
:ref:`nomodeset_kernel_param`.
|
||||
|
||||
**Advanced Options: Install Proxmox Backup Server (Debug Mode)**
|
||||
|
||||
Starts the installation in debug mode. A console will be opened at several
|
||||
installation steps. This helps to debug the situation if something goes wrong.
|
||||
To exit a debug console, press ``CTRL-D``. This option can be used to boot a
|
||||
live system with all basic tools available. You can use it, for example, to
|
||||
repair a degraded ZFS *rpool* or fix the :ref:`chapter-systembooting` for an
|
||||
existing Proxmox Backup Server setup.
|
||||
|
||||
**Advanced Options: Install Proxmox Backup Server (Terminal UI, Debug Mode)**
|
||||
|
||||
Same as the graphical debug mode, but preparing the system to run the
|
||||
terminal-based installer instead.
|
||||
|
||||
**Advanced Options: Install Proxmox Backup Server (Serial Console Debug Mode)**
|
||||
|
||||
Same the terminal-based debug mode, but additionally sets up the Linux kernel to
|
||||
use the (first) serial port of the machine for in- and output.
|
||||
|
||||
**Advanced Options: Rescue Boot**
|
||||
|
||||
With this option you can boot an existing installation. It searches all attached
|
||||
hard disks. If it finds an existing installation, it boots directly into that
|
||||
disk using the Linux kernel from the ISO. This can be useful if there are
|
||||
problems with the bootloader (GRUB/``systemd-boot``) or the BIOS/UEFI is unable
|
||||
to read the boot block from the disk.
|
||||
|
||||
**Advanced Options: Test Memory (memtest86+)**
|
||||
|
||||
Runs *memtest86+*. This is useful to check if the memory is functional and free
|
||||
of errors. Secure Boot must be turned off in the UEFI firmware setup utility to
|
||||
run this option.
|
||||
|
||||
You normally select *Install Proxmox Backup Server (Graphical)* to start the
|
||||
installation.
|
||||
|
||||
The first step is to read our EULA (End User License Agreement). Following this,
|
||||
you can select the target hard disk(s) for the installation.
|
||||
|
||||
.. caution:: By default, the whole server is used and all existing data is
|
||||
removed. Make sure there is no important data on the server before proceeding
|
||||
with the installation.
|
||||
|
||||
The *Options* button lets you select the target file system, which defaults to
|
||||
``ext4``. The installer uses LVM if you select ``ext4`` or ``xfs`` as a file
|
||||
system, and offers additional options to restrict LVM space (see :ref:`below
|
||||
<advanced_lvm_options>`).
|
||||
|
||||
.. image:: images/screenshots/pbs-installer-select-disk.png
|
||||
:target: _images/pbs-installer-select-disk.png
|
||||
:align: right
|
||||
:alt: Proxmox Backup Server Installer - Harddisk Selection Dialog
|
||||
|
||||
Proxmox Backup Server can also be installed on ZFS. As ZFS offers several
|
||||
software RAID levels, this is an option for systems that don't have a hardware
|
||||
RAID controller. The target disks must be selected in the *Options* dialog. More
|
||||
ZFS specific settings can be changed under :ref:`Advanced Options
|
||||
<advanced_zfs_options>`.
|
||||
|
||||
.. warning:: ZFS on top of any hardware RAID is not supported and can result in
|
||||
data loss.
|
||||
|
||||
.. image:: images/screenshots/pbs-installer-location.png
|
||||
:target: _images/pbs-installer-location.png
|
||||
:align: right
|
||||
:alt: Proxmox Backup Server Installer - Location and timezone configuration
|
||||
|
||||
The next page asks for basic configuration options like your location, time
|
||||
zone, and keyboard layout. The location is used to select a nearby download
|
||||
server, in order to increase the speed of updates. The installer is usually able
|
||||
to auto-detect these settings, so you only need to change them in rare
|
||||
situations when auto-detection fails, or when you want to use a keyboard layout
|
||||
not commonly used in your country.
|
||||
|
||||
.. image:: images/screenshots/pbs-installer-password.png
|
||||
:target: _images/pbs-installer-password.png
|
||||
:align: left
|
||||
:alt: Proxmox Backup Server Installer - Password and email configuration
|
||||
|
||||
Next the password of the superuser (``root``) and an email address needs to be
|
||||
specified. The password must consist of at least 8 characters. It's highly
|
||||
recommended to use a stronger password. Some guidelines are:
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
- Use a minimum password length of at least 12 characters.
|
||||
|
||||
- Include lowercase and uppercase alphabetic characters, numbers, and symbols.
|
||||
|
||||
- Avoid character repetition, keyboard patterns, common dictionary words,
|
||||
letter or number sequences, usernames, relative or pet names, romantic links
|
||||
(current or past), and biographical information (for example ID numbers,
|
||||
ancestors' names or dates).
|
||||
|
||||
The email address is used to send notifications to the system administrator.
|
||||
For example:
|
||||
|
||||
- Information about available package updates.
|
||||
|
||||
- Error messages from periodic *cron* jobs.
|
||||
|
||||
.. image:: images/screenshots/pbs-installer-network.png
|
||||
:target: _images/pbs-installer-network.png
|
||||
:align: right
|
||||
:alt: Proxmox Backup Server Installer - Network configuration
|
||||
|
||||
All those notification mails will be sent to the specified email address.
|
||||
|
||||
The last step is the network configuration. Network interfaces that are *UP*
|
||||
show a filled circle in front of their name in the drop down menu. Please note
|
||||
that during installation you can either specify an IPv4 or IPv6 address, but not
|
||||
both. To configure a dual stack node, add additional IP addresses after the
|
||||
installation.
|
||||
|
||||
.. image:: images/screenshots/pbs-installer-progress.png
|
||||
:target: _images/pbs-installer-progress.png
|
||||
:align: left
|
||||
:alt: Proxmox Backup Server Installer - Installation progress
|
||||
|
||||
The next step shows a summary of the previously selected options. Please
|
||||
re-check every setting and use the *Previous* button if a setting needs to be
|
||||
changed.
|
||||
|
||||
After clicking *Install*, the installer will begin to format the disks and copy
|
||||
packages to the target disk(s). Please wait until this step has finished; then
|
||||
remove the installation medium and restart your system.
|
||||
|
||||
.. image:: images/screenshots/pbs-installer-summary.png
|
||||
:target: _images/pbs-installer-summary.png
|
||||
:align: right
|
||||
:alt: Proxmox Backup Server Installer - Installation summary
|
||||
|
||||
Copying the packages usually takes several minutes, mostly depending on the
|
||||
speed of the installation medium and the target disk performance.
|
||||
|
||||
When copying and setting up the packages has finished, you can reboot the
|
||||
server. This will be done automatically after a few seconds by default.
|
||||
|
||||
Installation Failure
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If the installation failed, check out specific errors on the second TTY
|
||||
(``CTRL + ALT + F2``) and ensure that the systems meets the
|
||||
:ref:`minimum requirements <minimum_system_requirements>`.
|
||||
|
||||
If the installation is still not working, look at the :ref:`how to get help
|
||||
chapter <get_help>`.
|
||||
|
||||
Accessing the Management Interface Post-Installation
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-login-window.png
|
||||
:target: _images/pbs-gui-login-window.png
|
||||
:align: right
|
||||
:alt: Proxmox Backup Server - Management interface login dialog
|
||||
|
||||
After a successful installation and reboot of the system you can use the Proxmox
|
||||
Backup Server web interface for further configuration.
|
||||
|
||||
- Point your browser to the IP address given during the installation and port
|
||||
8007, for example: https://pbs.yourdomain.tld:8007
|
||||
|
||||
- Log in using the ``root`` (realm *Linux PAM standard authentication*) username
|
||||
and the password chosen during installation.
|
||||
|
||||
- Upload your subscription key to gain access to the Enterprise repository.
|
||||
Otherwise, you will need to set up one of the public, less tested package
|
||||
repositories to get updates for security fixes, bug fixes, and new features.
|
||||
|
||||
- Check the IP configuration and hostname.
|
||||
|
||||
- Check the timezone.
|
||||
|
||||
.. _advanced_lvm_options:
|
||||
|
||||
Advanced LVM Configuration Options
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The installer creates a Volume Group (VG) called ``pbs``, and additional Logical
|
||||
Volumes (LVs) called ``root`` and ``swap``, if ``ext4`` or ``xfs`` as filesystem
|
||||
is used. To control the size of these volumes use:
|
||||
|
||||
- *hdsize*
|
||||
|
||||
Defines the total hard disk size to be used. This way you can reserve free
|
||||
space on the hard disk for further partitioning.
|
||||
|
||||
- *swapsize*
|
||||
|
||||
Defines the size of the ``swap`` volume. The default is the size of the
|
||||
installed memory, minimum 4 GB and maximum 8 GB. The resulting value cannot
|
||||
be greater than ``hdsize/8``.
|
||||
|
||||
If set to ``0``, no ``swap`` volume will be created.
|
||||
|
||||
- *minfree*
|
||||
|
||||
Defines the amount of free space that should be left in the LVM volume group
|
||||
``pbs``. With more than 128GB storage available, the default is 16GB,
|
||||
otherwise ``hdsize/8`` will be used.
|
||||
|
||||
.. _advanced_zfs_options:
|
||||
|
||||
Advanced ZFS Configuration Options
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The installer creates the ZFS pool ``rpool``, if ZFS is used. No swap space is
|
||||
created but you can reserve some unpartitioned space on the install disks for
|
||||
swap. You can also create a swap zvol after the installation, although this can
|
||||
lead to problems (see :ref:`ZFS swap notes <zfs_swap>`).
|
||||
|
||||
- *ashift*
|
||||
|
||||
Defines the *ashift* value for the created pool. The *ashift* needs to be
|
||||
set at least to the sector-size of the underlying disks (2 to the power of
|
||||
*ashift* is the sector-size), or any disk which might be put in the pool
|
||||
(for example the replacement of a defective disk).
|
||||
|
||||
- *compress*
|
||||
|
||||
Defines whether compression is enabled for ``rpool``.
|
||||
|
||||
- *checksum*
|
||||
|
||||
Defines which checksumming algorithm should be used for ``rpool``.
|
||||
|
||||
- *copies*
|
||||
|
||||
Defines the *copies* parameter for ``rpool``. Check the ``zfs(8)`` manpage
|
||||
for the semantics, and why this does not replace redundancy on disk-level.
|
||||
|
||||
- *hdsize*
|
||||
|
||||
Defines the total hard disk size to be used. This is useful to save free
|
||||
space on the hard disk(s) for further partitioning (for example, to create a
|
||||
swap partition). *hdsize* is only honored for bootable disks, that is only
|
||||
the first disk or mirror for RAID0, RAID1 or RAID10, and all disks in
|
||||
RAID-Z[123].
|
||||
|
||||
ZFS Performance Tips
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
ZFS works best with a lot of memory. If you intend to use ZFS make sure to have
|
||||
enough RAM available for it. A good calculation is 4GB plus 1GB RAM for each TB
|
||||
of raw disk space.
|
||||
|
||||
ZFS can use a dedicated drive as write cache, called the ZFS Intent Log (ZIL).
|
||||
Use a fast drive (SSD) for it. It can be added after installation with the
|
||||
following command:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zpool add <pool-name> log </dev/path_to_fast_ssd>
|
||||
|
||||
.. _nomodeset_kernel_param:
|
||||
|
||||
Adding the ``nomodeset`` Kernel Parameter
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Problems may arise on very old or very new hardware due to graphics drivers. If
|
||||
the installation hangs during boot, you can try adding the ``nomodeset``
|
||||
parameter. This prevents the Linux kernel from loading any graphics drivers and
|
||||
forces it to continue using the BIOS/UEFI-provided framebuffer.
|
||||
|
||||
On the Proxmox Backup Server bootloader menu, navigate to *Install Proxmox
|
||||
Backup Server (Console)* and press ``e`` to edit the entry. Using the arrow
|
||||
keys, navigate to the line starting with ``linux``, move the cursor to the end
|
||||
of that line and add the parameter ``nomodeset``, separated by a space from the
|
||||
pre-existing last parameter.
|
||||
|
||||
Then press ``Ctrl-X`` or ``F10`` to boot the configuration.
|
@ -2,6 +2,7 @@ include ../defines.mk
|
||||
|
||||
UNITS := \
|
||||
proxmox-backup-daily-update.timer \
|
||||
removable-device-attach@.service
|
||||
|
||||
DYNAMIC_UNITS := \
|
||||
proxmox-backup-banner.service \
|
||||
|
8
etc/removable-device-attach@.service
Normal file
@ -0,0 +1,8 @@
|
||||
[Unit]
|
||||
Description=Try to mount the removable device of a datastore with uuid '%i'.
|
||||
After=proxmox-backup-proxy.service
|
||||
Requires=proxmox-backup-proxy.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/sbin/proxmox-backup-manager datastore uuid-mount %i
|
@ -10,7 +10,7 @@ use tokio::net::TcpStream;
|
||||
// Simple H2 client to test H2 download speed using h2server.rs
|
||||
|
||||
struct Process {
|
||||
body: h2::RecvStream,
|
||||
body: h2::legacy::RecvStream,
|
||||
trailers: bool,
|
||||
bytes: usize,
|
||||
}
|
||||
@ -50,11 +50,11 @@ impl Future for Process {
|
||||
}
|
||||
|
||||
fn send_request(
|
||||
mut client: h2::client::SendRequest<bytes::Bytes>,
|
||||
mut client: h2::legacy::client::SendRequest<bytes::Bytes>,
|
||||
) -> impl Future<Output = Result<usize, Error>> {
|
||||
println!("sending request");
|
||||
|
||||
let request = http::Request::builder()
|
||||
let request = hyper::http::Request::builder()
|
||||
.uri("http://localhost/")
|
||||
.body(())
|
||||
.unwrap();
|
||||
@ -78,7 +78,7 @@ async fn run() -> Result<(), Error> {
|
||||
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||
conn.set_nodelay(true).unwrap();
|
||||
|
||||
let (client, h2) = h2::client::Builder::new()
|
||||
let (client, h2) = h2::legacy::client::Builder::new()
|
||||
.initial_connection_window_size(1024 * 1024 * 1024)
|
||||
.initial_window_size(1024 * 1024 * 1024)
|
||||
.max_frame_size(4 * 1024 * 1024)
|
||||
|
@ -10,7 +10,7 @@ use tokio::net::TcpStream;
|
||||
// Simple H2 client to test H2 download speed using h2s-server.rs
|
||||
|
||||
struct Process {
|
||||
body: h2::RecvStream,
|
||||
body: h2::legacy::RecvStream,
|
||||
trailers: bool,
|
||||
bytes: usize,
|
||||
}
|
||||
@ -50,11 +50,11 @@ impl Future for Process {
|
||||
}
|
||||
|
||||
fn send_request(
|
||||
mut client: h2::client::SendRequest<bytes::Bytes>,
|
||||
mut client: h2::legacy::client::SendRequest<bytes::Bytes>,
|
||||
) -> impl Future<Output = Result<usize, Error>> {
|
||||
println!("sending request");
|
||||
|
||||
let request = http::Request::builder()
|
||||
let request = hyper::http::Request::builder()
|
||||
.uri("http://localhost/")
|
||||
.body(())
|
||||
.unwrap();
|
||||
@ -94,7 +94,7 @@ async fn run() -> Result<(), Error> {
|
||||
.await
|
||||
.map_err(|err| format_err!("connect failed - {}", err))?;
|
||||
|
||||
let (client, h2) = h2::client::Builder::new()
|
||||
let (client, h2) = h2::legacy::client::Builder::new()
|
||||
.initial_connection_window_size(1024 * 1024 * 1024)
|
||||
.initial_window_size(1024 * 1024 * 1024)
|
||||
.max_frame_size(4 * 1024 * 1024)
|
||||
|
@ -63,8 +63,11 @@ async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Res
|
||||
let body = Body::from(buffer);
|
||||
|
||||
let response = Response::builder()
|
||||
.status(http::StatusCode::OK)
|
||||
.header(http::header::CONTENT_TYPE, "application/octet-stream")
|
||||
.status(hyper::http::StatusCode::OK)
|
||||
.header(
|
||||
hyper::http::header::CONTENT_TYPE,
|
||||
"application/octet-stream",
|
||||
)
|
||||
.body(body)
|
||||
.unwrap();
|
||||
future::ok::<_, Error>(response)
|
||||
|
@ -39,8 +39,11 @@ async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
|
||||
let body = Body::from(buffer);
|
||||
|
||||
let response = Response::builder()
|
||||
.status(http::StatusCode::OK)
|
||||
.header(http::header::CONTENT_TYPE, "application/octet-stream")
|
||||
.status(hyper::http::StatusCode::OK)
|
||||
.header(
|
||||
hyper::http::header::CONTENT_TYPE,
|
||||
"application/octet-stream",
|
||||
)
|
||||
.body(body)
|
||||
.unwrap();
|
||||
future::ok::<_, Error>(response)
|
||||
|
@ -1,9 +1,10 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::*;
|
||||
|
||||
extern crate proxmox_backup;
|
||||
|
||||
use pbs_client::ChunkStream;
|
||||
use proxmox_human_byte::HumanByte;
|
||||
|
||||
// Test Chunker with real data read from a file.
|
||||
//
|
||||
@ -21,9 +22,19 @@ fn main() {
|
||||
async fn run() -> Result<(), Error> {
|
||||
let file = tokio::fs::File::open("random-test.dat").await?;
|
||||
|
||||
let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| bytes.to_vec())
|
||||
.map_err(Error::from);
|
||||
let mut args = std::env::args();
|
||||
args.next();
|
||||
|
||||
let buffer_size = args.next().unwrap_or("8k".to_string());
|
||||
let buffer_size = HumanByte::from_str(&buffer_size)?;
|
||||
println!("Using buffer size {buffer_size}");
|
||||
|
||||
let stream = tokio_util::codec::FramedRead::with_capacity(
|
||||
file,
|
||||
tokio_util::codec::BytesCodec::new(),
|
||||
buffer_size.as_u64() as usize,
|
||||
)
|
||||
.map_err(Error::from);
|
||||
|
||||
//let chunk_stream = FixedChunkStream::new(stream, 4*1024*1024);
|
||||
let mut chunk_stream = ChunkStream::new(stream, None, None, None);
|
||||
@ -40,7 +51,7 @@ async fn run() -> Result<(), Error> {
|
||||
repeat += 1;
|
||||
stream_len += chunk.len();
|
||||
|
||||
println!("Got chunk {}", chunk.len());
|
||||
//println!("Got chunk {}", chunk.len());
|
||||
}
|
||||
|
||||
let speed =
|
||||
|
@ -1,24 +0,0 @@
|
||||
[package]
|
||||
name = "pbs-api-types"
|
||||
version = "0.1.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
description = "general API type helpers for PBS"
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
const_format.workspace = true
|
||||
hex.workspace = true
|
||||
lazy_static.workspace = true
|
||||
percent-encoding.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_plain.workspace = true
|
||||
|
||||
proxmox-auth-api = { workspace = true, features = [ "api-types" ] }
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-lang.workspace=true
|
||||
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
||||
proxmox-serde.workspace = true
|
||||
proxmox-time.workspace = true
|
||||
proxmox-uuid = { workspace = true, features = [ "serde" ] }
|
@ -1,294 +0,0 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use const_format::concatcp;
|
||||
use serde::de::{value, IntoDeserializer};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_lang::constnamedbitmap;
|
||||
use proxmox_schema::{
|
||||
api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
|
||||
};
|
||||
|
||||
use crate::PROXMOX_SAFE_ID_REGEX_STR;
|
||||
|
||||
const_regex! {
|
||||
pub ACL_PATH_REGEX = concatcp!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR, ")+", r")$");
|
||||
}
|
||||
|
||||
// define Privilege bitfield
|
||||
|
||||
constnamedbitmap! {
|
||||
/// Contains a list of privilege name to privilege value mappings.
|
||||
///
|
||||
/// The names are used when displaying/persisting privileges anywhere, the values are used to
|
||||
/// allow easy matching of privileges as bitflags.
|
||||
PRIVILEGES: u64 => {
|
||||
/// Sys.Audit allows knowing about the system and its status
|
||||
PRIV_SYS_AUDIT("Sys.Audit");
|
||||
/// Sys.Modify allows modifying system-level configuration
|
||||
PRIV_SYS_MODIFY("Sys.Modify");
|
||||
/// Sys.Modify allows to poweroff/reboot/.. the system
|
||||
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement");
|
||||
|
||||
/// Datastore.Audit allows knowing about a datastore,
|
||||
/// including reading the configuration entry and listing its contents
|
||||
PRIV_DATASTORE_AUDIT("Datastore.Audit");
|
||||
/// Datastore.Allocate allows creating or deleting datastores
|
||||
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
|
||||
/// Datastore.Modify allows modifying a datastore and its contents
|
||||
PRIV_DATASTORE_MODIFY("Datastore.Modify");
|
||||
/// Datastore.Read allows reading arbitrary backup contents
|
||||
PRIV_DATASTORE_READ("Datastore.Read");
|
||||
/// Allows verifying a datastore
|
||||
PRIV_DATASTORE_VERIFY("Datastore.Verify");
|
||||
|
||||
/// Datastore.Backup allows Datastore.Read|Verify and creating new snapshots,
|
||||
/// but also requires backup ownership
|
||||
PRIV_DATASTORE_BACKUP("Datastore.Backup");
|
||||
/// Datastore.Prune allows deleting snapshots,
|
||||
/// but also requires backup ownership
|
||||
PRIV_DATASTORE_PRUNE("Datastore.Prune");
|
||||
|
||||
/// Permissions.Modify allows modifying ACLs
|
||||
PRIV_PERMISSIONS_MODIFY("Permissions.Modify");
|
||||
|
||||
/// Remote.Audit allows reading remote.cfg and sync.cfg entries
|
||||
PRIV_REMOTE_AUDIT("Remote.Audit");
|
||||
/// Remote.Modify allows modifying remote.cfg
|
||||
PRIV_REMOTE_MODIFY("Remote.Modify");
|
||||
/// Remote.Read allows reading data from a configured `Remote`
|
||||
PRIV_REMOTE_READ("Remote.Read");
|
||||
|
||||
/// Sys.Console allows access to the system's console
|
||||
PRIV_SYS_CONSOLE("Sys.Console");
|
||||
|
||||
/// Tape.Audit allows reading tape backup configuration and status
|
||||
PRIV_TAPE_AUDIT("Tape.Audit");
|
||||
/// Tape.Modify allows modifying tape backup configuration
|
||||
PRIV_TAPE_MODIFY("Tape.Modify");
|
||||
/// Tape.Write allows writing tape media
|
||||
PRIV_TAPE_WRITE("Tape.Write");
|
||||
/// Tape.Read allows reading tape backup configuration and media contents
|
||||
PRIV_TAPE_READ("Tape.Read");
|
||||
|
||||
/// Realm.Allocate allows viewing, creating, modifying and deleting realms
|
||||
PRIV_REALM_ALLOCATE("Realm.Allocate");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> {
|
||||
PRIVILEGES
|
||||
.iter()
|
||||
.fold(Vec::new(), |mut priv_names, (name, value)| {
|
||||
if value & privs != 0 {
|
||||
priv_names.push(name);
|
||||
}
|
||||
priv_names
|
||||
})
|
||||
}
|
||||
|
||||
/// Admin always has all privileges. It can do everything except a few actions
|
||||
/// which are limited to the 'root@pam` superuser
|
||||
pub const ROLE_ADMIN: u64 = u64::MAX;
|
||||
|
||||
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
||||
pub const ROLE_NO_ACCESS: u64 = 0;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Audit can view configuration and status information, but not modify it.
|
||||
pub const ROLE_AUDIT: u64 = 0
|
||||
| PRIV_SYS_AUDIT
|
||||
| PRIV_DATASTORE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Admin can do anything on the datastore.
|
||||
pub const ROLE_DATASTORE_ADMIN: u64 = 0
|
||||
| PRIV_DATASTORE_AUDIT
|
||||
| PRIV_DATASTORE_MODIFY
|
||||
| PRIV_DATASTORE_READ
|
||||
| PRIV_DATASTORE_VERIFY
|
||||
| PRIV_DATASTORE_BACKUP
|
||||
| PRIV_DATASTORE_PRUNE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Reader can read/verify datastore content and do restore
|
||||
pub const ROLE_DATASTORE_READER: u64 = 0
|
||||
| PRIV_DATASTORE_AUDIT
|
||||
| PRIV_DATASTORE_VERIFY
|
||||
| PRIV_DATASTORE_READ;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Backup can do backup and restore, but no prune.
|
||||
pub const ROLE_DATASTORE_BACKUP: u64 = 0
|
||||
| PRIV_DATASTORE_BACKUP;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.PowerUser can do backup, restore, and prune.
|
||||
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
|
||||
| PRIV_DATASTORE_PRUNE
|
||||
| PRIV_DATASTORE_BACKUP;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Audit can audit the datastore.
|
||||
pub const ROLE_DATASTORE_AUDIT: u64 = 0
|
||||
| PRIV_DATASTORE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.Audit can audit the remote
|
||||
pub const ROLE_REMOTE_AUDIT: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.Admin can do anything on the remote.
|
||||
pub const ROLE_REMOTE_ADMIN: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
| PRIV_REMOTE_MODIFY
|
||||
| PRIV_REMOTE_READ;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.SyncOperator can do read and prune on the remote.
|
||||
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
| PRIV_REMOTE_READ;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Tape.Audit can audit the tape backup configuration and media content
|
||||
pub const ROLE_TAPE_AUDIT: u64 = 0
|
||||
| PRIV_TAPE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Tape.Admin can do anything on the tape backup
|
||||
pub const ROLE_TAPE_ADMIN: u64 = 0
|
||||
| PRIV_TAPE_AUDIT
|
||||
| PRIV_TAPE_MODIFY
|
||||
| PRIV_TAPE_READ
|
||||
| PRIV_TAPE_WRITE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Tape.Operator can do tape backup and restore (but no configuration changes)
|
||||
pub const ROLE_TAPE_OPERATOR: u64 = 0
|
||||
| PRIV_TAPE_AUDIT
|
||||
| PRIV_TAPE_READ
|
||||
| PRIV_TAPE_WRITE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Tape.Reader can do read and inspect tape content
|
||||
pub const ROLE_TAPE_READER: u64 = 0
|
||||
| PRIV_TAPE_AUDIT
|
||||
| PRIV_TAPE_READ;
|
||||
|
||||
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
||||
pub const ROLE_NAME_NO_ACCESS: &str = "NoAccess";
|
||||
|
||||
#[api(
|
||||
type_text: "<role>",
|
||||
)]
|
||||
#[repr(u64)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Enum representing roles via their [PRIVILEGES] combination.
|
||||
///
|
||||
/// Since privileges are implemented as bitflags, each unique combination of privileges maps to a
|
||||
/// single, unique `u64` value that is used in this enum definition.
|
||||
pub enum Role {
|
||||
/// Administrator
|
||||
Admin = ROLE_ADMIN,
|
||||
/// Auditor
|
||||
Audit = ROLE_AUDIT,
|
||||
/// Disable Access
|
||||
NoAccess = ROLE_NO_ACCESS,
|
||||
/// Datastore Administrator
|
||||
DatastoreAdmin = ROLE_DATASTORE_ADMIN,
|
||||
/// Datastore Reader (inspect datastore content and do restores)
|
||||
DatastoreReader = ROLE_DATASTORE_READER,
|
||||
/// Datastore Backup (backup and restore owned backups)
|
||||
DatastoreBackup = ROLE_DATASTORE_BACKUP,
|
||||
/// Datastore PowerUser (backup, restore and prune owned backup)
|
||||
DatastorePowerUser = ROLE_DATASTORE_POWERUSER,
|
||||
/// Datastore Auditor
|
||||
DatastoreAudit = ROLE_DATASTORE_AUDIT,
|
||||
/// Remote Auditor
|
||||
RemoteAudit = ROLE_REMOTE_AUDIT,
|
||||
/// Remote Administrator
|
||||
RemoteAdmin = ROLE_REMOTE_ADMIN,
|
||||
/// Syncronisation Opertator
|
||||
RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR,
|
||||
/// Tape Auditor
|
||||
TapeAudit = ROLE_TAPE_AUDIT,
|
||||
/// Tape Administrator
|
||||
TapeAdmin = ROLE_TAPE_ADMIN,
|
||||
/// Tape Operator
|
||||
TapeOperator = ROLE_TAPE_OPERATOR,
|
||||
/// Tape Reader
|
||||
TapeReader = ROLE_TAPE_READER,
|
||||
}
|
||||
|
||||
impl FromStr for Role {
|
||||
type Err = value::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Self::deserialize(s.into_deserializer())
|
||||
}
|
||||
}
|
||||
|
||||
pub const ACL_PATH_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
||||
|
||||
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new("Access control path.")
|
||||
.format(&ACL_PATH_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(128)
|
||||
.schema();
|
||||
|
||||
pub const ACL_PROPAGATE_SCHEMA: Schema =
|
||||
BooleanSchema::new("Allow to propagate (inherit) permissions.")
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' property.")
|
||||
.format(&ApiStringFormat::Enum(&[
|
||||
EnumEntry::new("user", "User"),
|
||||
EnumEntry::new("group", "Group"),
|
||||
]))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
propagate: {
|
||||
schema: ACL_PROPAGATE_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: ACL_PATH_SCHEMA,
|
||||
},
|
||||
ugid_type: {
|
||||
schema: ACL_UGID_TYPE_SCHEMA,
|
||||
},
|
||||
ugid: {
|
||||
type: String,
|
||||
description: "User or Group ID.",
|
||||
},
|
||||
roleid: {
|
||||
type: Role,
|
||||
}
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
/// ACL list entry.
|
||||
pub struct AclListItem {
|
||||
pub path: String,
|
||||
pub ugid: String,
|
||||
pub ugid_type: String,
|
||||
pub propagate: bool,
|
||||
pub roleid: String,
|
||||
}
|
@ -1,98 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, Updater};
|
||||
|
||||
use super::{
|
||||
LdapMode, LDAP_DOMAIN_SCHEMA, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
SYNC_ATTRIBUTES_SCHEMA, SYNC_DEFAULTS_STRING_SCHEMA, USER_CLASSES_SCHEMA,
|
||||
};
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"realm": {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"comment": {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
"verify": {
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"sync-defaults-options": {
|
||||
schema: SYNC_DEFAULTS_STRING_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"sync-attributes": {
|
||||
schema: SYNC_ATTRIBUTES_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"user-classes" : {
|
||||
optional: true,
|
||||
schema: USER_CLASSES_SCHEMA,
|
||||
},
|
||||
"base-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"bind-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
optional: true,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// AD realm configuration properties.
|
||||
pub struct AdRealmConfig {
|
||||
#[updater(skip)]
|
||||
pub realm: String,
|
||||
/// AD server address
|
||||
pub server1: String,
|
||||
/// Fallback AD server address
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub server2: Option<String>,
|
||||
/// AD server Port
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub port: Option<u16>,
|
||||
/// Base domain name. Users are searched under this domain using a `subtree search`.
|
||||
/// Expected to be set only internally to `defaultNamingContext` of the AD server, but can be
|
||||
/// overridden if the need arises.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub base_dn: Option<String>,
|
||||
/// Comment
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Connection security
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mode: Option<LdapMode>,
|
||||
/// Verify server certificate
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub verify: Option<bool>,
|
||||
/// CA certificate to use for the server. The path can point to
|
||||
/// either a file, or a directory. If it points to a file,
|
||||
/// the PEM-formatted X.509 certificate stored at the path
|
||||
/// will be added as a trusted certificate.
|
||||
/// If the path points to a directory,
|
||||
/// the directory replaces the system's default certificate
|
||||
/// store at `/etc/ssl/certs` - Every file in the directory
|
||||
/// will be loaded as a trusted certificate.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub capath: Option<String>,
|
||||
/// Bind domain to use for looking up users
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bind_dn: Option<String>,
|
||||
/// Custom LDAP search filter for user sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub filter: Option<String>,
|
||||
/// Default options for AD sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_defaults_options: Option<String>,
|
||||
/// List of LDAP attributes to sync from AD to user config
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_attributes: Option<String>,
|
||||
/// User ``objectClass`` classes to sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub user_classes: Option<String>,
|
||||
}
|
@ -1,95 +0,0 @@
|
||||
use std::fmt::{self, Display};
|
||||
|
||||
use anyhow::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api(default: "encrypt")]
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
|
||||
pub enum CryptMode {
|
||||
/// Don't encrypt.
|
||||
None,
|
||||
/// Encrypt.
|
||||
Encrypt,
|
||||
/// Only sign.
|
||||
SignOnly,
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Hash, Clone, Deserialize, Serialize)]
|
||||
#[serde(transparent)]
|
||||
/// 32-byte fingerprint, usually calculated with SHA256.
|
||||
pub struct Fingerprint {
|
||||
#[serde(with = "bytes_as_fingerprint")]
|
||||
bytes: [u8; 32],
|
||||
}
|
||||
|
||||
impl Fingerprint {
|
||||
pub fn new(bytes: [u8; 32]) -> Self {
|
||||
Self { bytes }
|
||||
}
|
||||
pub fn bytes(&self) -> &[u8; 32] {
|
||||
&self.bytes
|
||||
}
|
||||
pub fn signature(&self) -> String {
|
||||
as_fingerprint(&self.bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// Display as short key ID
|
||||
impl Display for Fingerprint {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", as_fingerprint(&self.bytes[0..8]))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for Fingerprint {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Error> {
|
||||
let mut tmp = s.to_string();
|
||||
tmp.retain(|c| c != ':');
|
||||
let mut bytes = [0u8; 32];
|
||||
hex::decode_to_slice(&tmp, &mut bytes)?;
|
||||
Ok(Fingerprint::new(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
fn as_fingerprint(bytes: &[u8]) -> String {
|
||||
hex::encode(bytes)
|
||||
.as_bytes()
|
||||
.chunks(2)
|
||||
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
|
||||
.collect::<Vec<&str>>()
|
||||
.join(":")
|
||||
}
|
||||
|
||||
pub mod bytes_as_fingerprint {
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
use serde::{Deserialize, Deserializer, Serializer};
|
||||
|
||||
pub fn serialize<S>(bytes: &[u8; 32], serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let s = super::as_fingerprint(bytes);
|
||||
serializer.serialize_str(&s)
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
// TODO: more efficiently implement with a Visitor implementing visit_str using split() and
|
||||
// hex::decode by-byte
|
||||
let mut s = String::deserialize(deserializer)?;
|
||||
s.retain(|c| c != ':');
|
||||
let mut out = MaybeUninit::<[u8; 32]>::uninit();
|
||||
hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] })
|
||||
.map_err(serde::de::Error::custom)?;
|
||||
Ok(unsafe { out.assume_init() })
|
||||
}
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// General status information about a running VM file-restore daemon
|
||||
pub struct RestoreDaemonStatus {
|
||||
/// VM uptime in seconds
|
||||
pub uptime: i64,
|
||||
/// time left until auto-shutdown, keep in mind that this is useless when 'keep-timeout' is
|
||||
/// not set, as then the status call will have reset the timer before returning the value
|
||||
pub timeout: i64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// The desired format of the result.
|
||||
pub enum FileRestoreFormat {
|
||||
/// Plain file (only works for single files)
|
||||
Plain,
|
||||
/// PXAR archive
|
||||
Pxar,
|
||||
/// ZIP archive
|
||||
Zip,
|
||||
/// TAR archive
|
||||
Tar,
|
||||
}
|
@ -1,799 +0,0 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::bail;
|
||||
use const_format::concatcp;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::*;
|
||||
|
||||
use crate::{
|
||||
Authid, BackupNamespace, BackupType, NotificationMode, RateLimitConfig, Userid,
|
||||
BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA,
|
||||
DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
|
||||
PROXMOX_SAFE_ID_REGEX_STR, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
|
||||
const_regex! {
|
||||
|
||||
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
|
||||
pub VERIFICATION_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"):");
|
||||
/// Regex for sync jobs '(REMOTE|\-):REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID'
|
||||
pub SYNC_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"|\-):(", PROXMOX_SAFE_ID_REGEX_STR, r"):(", PROXMOX_SAFE_ID_REGEX_STR, r")(?::(", BACKUP_NS_RE, r"))?:");
|
||||
}
|
||||
|
||||
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run sync job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(
|
||||
proxmox_time::verify_calendar_event,
|
||||
))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const GC_SCHEDULE_SCHEMA: Schema =
|
||||
StringSchema::new("Run garbage collection job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(
|
||||
proxmox_time::verify_calendar_event,
|
||||
))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run prune job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(
|
||||
proxmox_time::verify_calendar_event,
|
||||
))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema =
|
||||
StringSchema::new("Run verify job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(
|
||||
proxmox_time::verify_calendar_event,
|
||||
))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Delete vanished backups. This remove the local copy if the remote backup was deleted.",
|
||||
)
|
||||
.default(false)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"next-run": {
|
||||
description: "Estimated time of the next run (UNIX epoch).",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
"last-run-state": {
|
||||
description: "Result of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-upid": {
|
||||
description: "Task UPID of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-endtime": {
|
||||
description: "Endtime of the last run.",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Job Scheduling Status
|
||||
pub struct JobScheduleStatus {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub next_run: Option<i64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_run_state: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_run_upid: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_run_endtime: Option<i64>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// When do we send notifications
|
||||
pub enum Notify {
|
||||
/// Never send notification
|
||||
Never,
|
||||
/// Send notifications for failed and successful jobs
|
||||
Always,
|
||||
/// Send notifications for failed jobs only
|
||||
Error,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
gc: {
|
||||
type: Notify,
|
||||
optional: true,
|
||||
},
|
||||
verify: {
|
||||
type: Notify,
|
||||
optional: true,
|
||||
},
|
||||
sync: {
|
||||
type: Notify,
|
||||
optional: true,
|
||||
},
|
||||
prune: {
|
||||
type: Notify,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
/// Datastore notify settings
|
||||
pub struct DatastoreNotify {
|
||||
/// Garbage collection settings
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub gc: Option<Notify>,
|
||||
/// Verify job setting
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub verify: Option<Notify>,
|
||||
/// Sync job setting
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync: Option<Notify>,
|
||||
/// Prune job setting
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub prune: Option<Notify>,
|
||||
}
|
||||
|
||||
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
|
||||
"Datastore notification setting, enum can be one of 'always', 'never', or 'error'.",
|
||||
)
|
||||
.format(&ApiStringFormat::PropertyString(
|
||||
&DatastoreNotify::API_SCHEMA,
|
||||
))
|
||||
.schema();
|
||||
|
||||
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Do not verify backups that are already verified if their verification is not outdated.",
|
||||
)
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema =
|
||||
IntegerSchema::new("Days after that a verification becomes outdated. (0 is deprecated)'")
|
||||
.minimum(0)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"ignore-verified": {
|
||||
optional: true,
|
||||
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||
},
|
||||
"outdated-after": {
|
||||
optional: true,
|
||||
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||
},
|
||||
ns: {
|
||||
optional: true,
|
||||
schema: BACKUP_NAMESPACE_SCHEMA,
|
||||
},
|
||||
"max-depth": {
|
||||
optional: true,
|
||||
schema: crate::NS_MAX_DEPTH_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Verification Job
|
||||
pub struct VerificationJobConfig {
|
||||
/// unique ID to address this job
|
||||
#[updater(skip)]
|
||||
pub id: String,
|
||||
/// the datastore ID this verification job affects
|
||||
pub store: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// if not set to false, check the age of the last snapshot verification to filter
|
||||
/// out recent ones, depending on 'outdated_after' configuration.
|
||||
pub ignore_verified: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
|
||||
pub outdated_after: Option<i64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// when to schedule this job in calendar event notation
|
||||
pub schedule: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
/// on which backup namespace to run the verification recursively
|
||||
pub ns: Option<BackupNamespace>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
/// how deep the verify should go from the `ns` level downwards. Passing 0 verifies only the
|
||||
/// snapshots on the same level as the passed `ns`, or the datastore root if none.
|
||||
pub max_depth: Option<usize>,
|
||||
}
|
||||
|
||||
impl VerificationJobConfig {
|
||||
pub fn acl_path(&self) -> Vec<&str> {
|
||||
match self.ns.as_ref() {
|
||||
Some(ns) => ns.acl_path(&self.store),
|
||||
None => vec!["datastore", &self.store],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: VerificationJobConfig,
|
||||
},
|
||||
status: {
|
||||
type: JobScheduleStatus,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Status of Verification Job
|
||||
pub struct VerificationJobStatus {
|
||||
#[serde(flatten)]
|
||||
pub config: VerificationJobConfig,
|
||||
#[serde(flatten)]
|
||||
pub status: JobScheduleStatus,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
pool: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
},
|
||||
drive: {
|
||||
schema: DRIVE_NAME_SCHEMA,
|
||||
},
|
||||
"eject-media": {
|
||||
description: "Eject media upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"export-media-set": {
|
||||
description: "Export media set upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"latest-only": {
|
||||
description: "Backup latest snapshots only.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"notify-user": {
|
||||
optional: true,
|
||||
type: Userid,
|
||||
},
|
||||
"group-filter": {
|
||||
schema: GROUP_FILTER_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"max-depth": {
|
||||
schema: crate::NS_MAX_DEPTH_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Tape Backup Job Setup
|
||||
pub struct TapeBackupJobSetup {
|
||||
pub store: String,
|
||||
pub pool: String,
|
||||
pub drive: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub eject_media: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub export_media_set: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub latest_only: Option<bool>,
|
||||
/// Send job email notification to this user
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub notify_user: Option<Userid>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub notification_mode: Option<NotificationMode>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub group_filter: Option<Vec<GroupFilter>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub ns: Option<BackupNamespace>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub max_depth: Option<usize>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
setup: {
|
||||
type: TapeBackupJobSetup,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Tape Backup Job
|
||||
pub struct TapeBackupJobConfig {
|
||||
#[updater(skip)]
|
||||
pub id: String,
|
||||
#[serde(flatten)]
|
||||
pub setup: TapeBackupJobSetup,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub schedule: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: TapeBackupJobConfig,
|
||||
},
|
||||
status: {
|
||||
type: JobScheduleStatus,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Status of Tape Backup Job
|
||||
pub struct TapeBackupJobStatus {
|
||||
#[serde(flatten)]
|
||||
pub config: TapeBackupJobConfig,
|
||||
#[serde(flatten)]
|
||||
pub status: JobScheduleStatus,
|
||||
/// Next tape used (best guess)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub next_media_label: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
|
||||
pub enum FilterType {
|
||||
/// BackupGroup type - either `vm`, `ct`, or `host`.
|
||||
BackupType(BackupType),
|
||||
/// Full identifier of BackupGroup, including type
|
||||
Group(String),
|
||||
/// A regular expression matched against the full identifier of the BackupGroup
|
||||
Regex(Regex),
|
||||
}
|
||||
|
||||
impl PartialEq for FilterType {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
match (self, other) {
|
||||
(Self::BackupType(a), Self::BackupType(b)) => a == b,
|
||||
(Self::Group(a), Self::Group(b)) => a == b,
|
||||
(Self::Regex(a), Self::Regex(b)) => a.as_str() == b.as_str(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for FilterType {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(match s.split_once(':') {
|
||||
Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| FilterType::Group(value.to_string()))?,
|
||||
Some(("type", value)) => FilterType::BackupType(value.parse()?),
|
||||
Some(("regex", value)) => FilterType::Regex(Regex::new(value)?),
|
||||
Some((ty, _value)) => bail!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty),
|
||||
None => bail!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// used for serializing below, caution!
|
||||
impl std::fmt::Display for FilterType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
FilterType::BackupType(backup_type) => write!(f, "type:{}", backup_type),
|
||||
FilterType::Group(backup_group) => write!(f, "group:{}", backup_group),
|
||||
FilterType::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GroupFilter {
|
||||
pub is_exclude: bool,
|
||||
pub filter_type: FilterType,
|
||||
}
|
||||
|
||||
impl PartialEq for GroupFilter {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.filter_type == other.filter_type && self.is_exclude == other.is_exclude
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for GroupFilter {}
|
||||
|
||||
impl std::str::FromStr for GroupFilter {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let (is_exclude, type_str) = match s.split_once(':') {
|
||||
Some(("include", value)) => (false, value),
|
||||
Some(("exclude", value)) => (true, value),
|
||||
_ => (false, s),
|
||||
};
|
||||
|
||||
Ok(GroupFilter {
|
||||
is_exclude,
|
||||
filter_type: type_str.parse()?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// used for serializing below, caution!
|
||||
impl std::fmt::Display for GroupFilter {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
if self.is_exclude {
|
||||
f.write_str("exclude:")?;
|
||||
}
|
||||
std::fmt::Display::fmt(&self.filter_type, f)
|
||||
}
|
||||
}
|
||||
|
||||
proxmox_serde::forward_deserialize_to_from_str!(GroupFilter);
|
||||
proxmox_serde::forward_serialize_to_display!(GroupFilter);
|
||||
|
||||
fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
|
||||
GroupFilter::from_str(input).map(|_| ())
|
||||
}
|
||||
|
||||
pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
|
||||
"Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE'). Can be inverted by prepending 'exclude:'.")
|
||||
.format(&ApiStringFormat::VerifyFn(verify_group_filter))
|
||||
.type_text("[<exclude:|include:>]<type:<vm|ct|host>|group:GROUP|regex:RE>")
|
||||
.schema();
|
||||
|
||||
pub const GROUP_FILTER_LIST_SCHEMA: Schema =
|
||||
ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
|
||||
|
||||
pub const TRANSFER_LAST_SCHEMA: Schema =
|
||||
IntegerSchema::new("Limit transfer to last N snapshots (per group), skipping others")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"owner": {
|
||||
type: Authid,
|
||||
optional: true,
|
||||
},
|
||||
remote: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"remote-store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"remote-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"remove-vanished": {
|
||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"max-depth": {
|
||||
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
limit: {
|
||||
type: RateLimitConfig,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"group-filter": {
|
||||
schema: GROUP_FILTER_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"transfer-last": {
|
||||
schema: TRANSFER_LAST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Sync Job
|
||||
pub struct SyncJobConfig {
|
||||
#[updater(skip)]
|
||||
pub id: String,
|
||||
pub store: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ns: Option<BackupNamespace>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub owner: Option<Authid>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// None implies local sync.
|
||||
pub remote: Option<String>,
|
||||
pub remote_store: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub remote_ns: Option<BackupNamespace>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub remove_vanished: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub max_depth: Option<usize>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub schedule: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub group_filter: Option<Vec<GroupFilter>>,
|
||||
#[serde(flatten)]
|
||||
pub limit: RateLimitConfig,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub transfer_last: Option<usize>,
|
||||
}
|
||||
|
||||
impl SyncJobConfig {
|
||||
pub fn acl_path(&self) -> Vec<&str> {
|
||||
match self.ns.as_ref() {
|
||||
Some(ns) => ns.acl_path(&self.store),
|
||||
None => vec!["datastore", &self.store],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: SyncJobConfig,
|
||||
},
|
||||
status: {
|
||||
type: JobScheduleStatus,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Status of Sync Job
|
||||
pub struct SyncJobStatus {
|
||||
#[serde(flatten)]
|
||||
pub config: SyncJobConfig,
|
||||
#[serde(flatten)]
|
||||
pub status: JobScheduleStatus,
|
||||
}
|
||||
|
||||
/// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API
|
||||
/// call to prune a specific group, where `max-depth` makes no sense.
|
||||
#[api(
|
||||
properties: {
|
||||
"keep-last": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_LAST,
|
||||
optional: true,
|
||||
},
|
||||
"keep-hourly": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_HOURLY,
|
||||
optional: true,
|
||||
},
|
||||
"keep-daily": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_DAILY,
|
||||
optional: true,
|
||||
},
|
||||
"keep-weekly": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||
optional: true,
|
||||
},
|
||||
"keep-monthly": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||
optional: true,
|
||||
},
|
||||
"keep-yearly": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_YEARLY,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Common pruning options
|
||||
pub struct KeepOptions {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_last: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_hourly: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_daily: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_weekly: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_monthly: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_yearly: Option<u64>,
|
||||
}
|
||||
|
||||
impl KeepOptions {
|
||||
pub fn keeps_something(&self) -> bool {
|
||||
self.keep_last.unwrap_or(0)
|
||||
+ self.keep_hourly.unwrap_or(0)
|
||||
+ self.keep_daily.unwrap_or(0)
|
||||
+ self.keep_weekly.unwrap_or(0)
|
||||
+ self.keep_monthly.unwrap_or(0)
|
||||
+ self.keep_yearly.unwrap_or(0)
|
||||
> 0
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
keep: {
|
||||
type: KeepOptions,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"max-depth": {
|
||||
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Common pruning options
|
||||
pub struct PruneJobOptions {
|
||||
#[serde(flatten)]
|
||||
pub keep: KeepOptions,
|
||||
|
||||
/// The (optional) recursion depth
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub max_depth: Option<usize>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ns: Option<BackupNamespace>,
|
||||
}
|
||||
|
||||
impl PruneJobOptions {
|
||||
pub fn keeps_something(&self) -> bool {
|
||||
self.keep.keeps_something()
|
||||
}
|
||||
|
||||
pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
|
||||
match &self.ns {
|
||||
Some(ns) => ns.acl_path(store),
|
||||
None => vec!["datastore", store],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
disable: {
|
||||
type: Boolean,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
options: {
|
||||
type: PruneJobOptions,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Deserialize, Serialize, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Prune configuration.
|
||||
pub struct PruneJobConfig {
|
||||
/// unique ID to address this job
|
||||
#[updater(skip)]
|
||||
pub id: String,
|
||||
|
||||
pub store: String,
|
||||
|
||||
/// Disable this job.
|
||||
#[serde(default, skip_serializing_if = "is_false")]
|
||||
#[updater(serde(skip_serializing_if = "Option::is_none"))]
|
||||
pub disable: bool,
|
||||
|
||||
pub schedule: String,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
|
||||
#[serde(flatten)]
|
||||
pub options: PruneJobOptions,
|
||||
}
|
||||
|
||||
impl PruneJobConfig {
|
||||
pub fn acl_path(&self) -> Vec<&str> {
|
||||
self.options.acl_path(&self.store)
|
||||
}
|
||||
}
|
||||
|
||||
fn is_false(b: &bool) -> bool {
|
||||
!b
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: PruneJobConfig,
|
||||
},
|
||||
status: {
|
||||
type: JobScheduleStatus,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Status of prune job
|
||||
pub struct PruneJobStatus {
|
||||
#[serde(flatten)]
|
||||
pub config: PruneJobConfig,
|
||||
#[serde(flatten)]
|
||||
pub status: JobScheduleStatus,
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
use crate::CERT_FINGERPRINT_SHA256_SCHEMA;
|
||||
|
||||
#[api(default: "scrypt")]
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Key derivation function for password protected encryption keys.
|
||||
pub enum Kdf {
|
||||
/// Do not encrypt the key.
|
||||
None,
|
||||
/// Encrypt they key with a password using SCrypt.
|
||||
Scrypt,
|
||||
/// Encrtypt the Key with a password using PBKDF2
|
||||
PBKDF2,
|
||||
}
|
||||
|
||||
impl Default for Kdf {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
Kdf::Scrypt
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
kdf: {
|
||||
type: Kdf,
|
||||
},
|
||||
fingerprint: {
|
||||
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
/// Encryption Key Information
|
||||
pub struct KeyInfo {
|
||||
/// Path to key (if stored in a file)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub path: Option<String>,
|
||||
pub kdf: Kdf,
|
||||
/// Key creation time
|
||||
pub created: i64,
|
||||
/// Key modification time
|
||||
pub modified: i64,
|
||||
/// Key fingerprint
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub fingerprint: Option<String>,
|
||||
/// Password hint
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub hint: Option<String>,
|
||||
}
|
@ -1,208 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater};
|
||||
|
||||
use super::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA};
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
/// LDAP connection type
|
||||
pub enum LdapMode {
|
||||
/// Plaintext LDAP connection
|
||||
#[serde(rename = "ldap")]
|
||||
#[default]
|
||||
Ldap,
|
||||
/// Secure STARTTLS connection
|
||||
#[serde(rename = "ldap+starttls")]
|
||||
StartTls,
|
||||
/// Secure LDAPS connection
|
||||
#[serde(rename = "ldaps")]
|
||||
Ldaps,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"realm": {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"comment": {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
"verify": {
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"sync-defaults-options": {
|
||||
schema: SYNC_DEFAULTS_STRING_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"sync-attributes": {
|
||||
schema: SYNC_ATTRIBUTES_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"user-classes" : {
|
||||
optional: true,
|
||||
schema: USER_CLASSES_SCHEMA,
|
||||
},
|
||||
"base-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
},
|
||||
"bind-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
optional: true,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// LDAP configuration properties.
|
||||
pub struct LdapRealmConfig {
|
||||
#[updater(skip)]
|
||||
pub realm: String,
|
||||
/// LDAP server address
|
||||
pub server1: String,
|
||||
/// Fallback LDAP server address
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub server2: Option<String>,
|
||||
/// Port
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub port: Option<u16>,
|
||||
/// Base domain name. Users are searched under this domain using a `subtree search`.
|
||||
pub base_dn: String,
|
||||
/// Username attribute. Used to map a ``userid`` to LDAP to an LDAP ``dn``.
|
||||
pub user_attr: String,
|
||||
/// Comment
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Connection security
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mode: Option<LdapMode>,
|
||||
/// Verify server certificate
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub verify: Option<bool>,
|
||||
/// CA certificate to use for the server. The path can point to
|
||||
/// either a file, or a directory. If it points to a file,
|
||||
/// the PEM-formatted X.509 certificate stored at the path
|
||||
/// will be added as a trusted certificate.
|
||||
/// If the path points to a directory,
|
||||
/// the directory replaces the system's default certificate
|
||||
/// store at `/etc/ssl/certs` - Every file in the directory
|
||||
/// will be loaded as a trusted certificate.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub capath: Option<String>,
|
||||
/// Bind domain to use for looking up users
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bind_dn: Option<String>,
|
||||
/// Custom LDAP search filter for user sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub filter: Option<String>,
|
||||
/// Default options for LDAP sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_defaults_options: Option<String>,
|
||||
/// List of attributes to sync from LDAP to user config
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_attributes: Option<String>,
|
||||
/// User ``objectClass`` classes to sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub user_classes: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"remove-vanished": {
|
||||
optional: true,
|
||||
schema: REMOVE_VANISHED_SCHEMA,
|
||||
},
|
||||
},
|
||||
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Default, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Default options for LDAP synchronization runs
|
||||
pub struct SyncDefaultsOptions {
|
||||
/// How to handle vanished properties/users
|
||||
pub remove_vanished: Option<String>,
|
||||
/// Enable new users after sync
|
||||
pub enable_new: Option<bool>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// remove-vanished options
|
||||
pub enum RemoveVanished {
|
||||
/// Delete ACLs for vanished users
|
||||
Acl,
|
||||
/// Remove vanished users
|
||||
Entry,
|
||||
/// Remove vanished properties from users (e.g. email)
|
||||
Properties,
|
||||
}
|
||||
|
||||
pub const LDAP_DOMAIN_SCHEMA: Schema = StringSchema::new("LDAP Domain").schema();
|
||||
|
||||
pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults options")
|
||||
.format(&ApiStringFormat::PropertyString(
|
||||
&SyncDefaultsOptions::API_SCHEMA,
|
||||
))
|
||||
.schema();
|
||||
|
||||
const REMOVE_VANISHED_DESCRIPTION: &str =
|
||||
"A semicolon-seperated list of things to remove when they or the user \
|
||||
vanishes during user synchronization. The following values are possible: ``entry`` removes the \
|
||||
user when not returned from the sync; ``properties`` removes any \
|
||||
properties on existing user that do not appear in the source. \
|
||||
``acl`` removes ACLs when the user is not returned from the sync.";
|
||||
|
||||
pub const REMOVE_VANISHED_SCHEMA: Schema = StringSchema::new(REMOVE_VANISHED_DESCRIPTION)
|
||||
.format(&ApiStringFormat::PropertyString(&REMOVE_VANISHED_ARRAY))
|
||||
.schema();
|
||||
|
||||
pub const REMOVE_VANISHED_ARRAY: Schema = ArraySchema::new(
|
||||
"Array of remove-vanished options",
|
||||
&RemoveVanished::API_SCHEMA,
|
||||
)
|
||||
.min_length(1)
|
||||
.schema();
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize, Updater, Default, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Determine which LDAP attributes should be synced to which user attributes
|
||||
pub struct SyncAttributes {
|
||||
/// Name of the LDAP attribute containing the user's email address
|
||||
pub email: Option<String>,
|
||||
/// Name of the LDAP attribute containing the user's first name
|
||||
pub firstname: Option<String>,
|
||||
/// Name of the LDAP attribute containing the user's last name
|
||||
pub lastname: Option<String>,
|
||||
}
|
||||
|
||||
const SYNC_ATTRIBUTES_TEXT: &str = "Comma-separated list of key=value pairs for specifying \
|
||||
which LDAP attributes map to which PBS user field. For example, \
|
||||
to map the LDAP attribute ``mail`` to PBS's ``email``, write \
|
||||
``email=mail``.";
|
||||
|
||||
pub const SYNC_ATTRIBUTES_SCHEMA: Schema = StringSchema::new(SYNC_ATTRIBUTES_TEXT)
|
||||
.format(&ApiStringFormat::PropertyString(
|
||||
&SyncAttributes::API_SCHEMA,
|
||||
))
|
||||
.schema();
|
||||
|
||||
pub const USER_CLASSES_ARRAY: Schema = ArraySchema::new(
|
||||
"Array of user classes",
|
||||
&StringSchema::new("user class").schema(),
|
||||
)
|
||||
.min_length(1)
|
||||
.schema();
|
||||
|
||||
const USER_CLASSES_TEXT: &str = "Comma-separated list of allowed objectClass values for \
|
||||
user synchronization. For instance, if ``user-classes`` is set to ``person,user``, \
|
||||
then user synchronization will consider all LDAP entities \
|
||||
where ``objectClass: person`` `or` ``objectClass: user``.";
|
||||
|
||||
pub const USER_CLASSES_SCHEMA: Schema = StringSchema::new(USER_CLASSES_TEXT)
|
||||
.format(&ApiStringFormat::PropertyString(&USER_CLASSES_ARRAY))
|
||||
.default("inetorgperson,posixaccount,person,user")
|
||||
.schema();
|
@ -1,417 +0,0 @@
|
||||
//! Basic API types used by most of the PBS code.
|
||||
|
||||
use const_format::concatcp;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub mod percent_encoding;
|
||||
|
||||
use proxmox_schema::{
|
||||
api, const_regex, ApiStringFormat, ApiType, ArraySchema, ReturnType, Schema, StringSchema,
|
||||
};
|
||||
use proxmox_time::parse_daily_duration;
|
||||
|
||||
use proxmox_auth_api::types::{APITOKEN_ID_REGEX_STR, USER_ID_REGEX_STR};
|
||||
|
||||
pub use proxmox_schema::api_types::SAFE_ID_FORMAT as PROXMOX_SAFE_ID_FORMAT;
|
||||
pub use proxmox_schema::api_types::SAFE_ID_REGEX as PROXMOX_SAFE_ID_REGEX;
|
||||
pub use proxmox_schema::api_types::SAFE_ID_REGEX_STR as PROXMOX_SAFE_ID_REGEX_STR;
|
||||
pub use proxmox_schema::api_types::{
|
||||
BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX, BLOCKDEVICE_NAME_REGEX,
|
||||
};
|
||||
pub use proxmox_schema::api_types::{DNS_ALIAS_REGEX, DNS_NAME_OR_IP_REGEX, DNS_NAME_REGEX};
|
||||
pub use proxmox_schema::api_types::{FINGERPRINT_SHA256_REGEX, SHA256_HEX_REGEX};
|
||||
pub use proxmox_schema::api_types::{
|
||||
GENERIC_URI_REGEX, HOSTNAME_REGEX, HOST_PORT_REGEX, HTTP_URL_REGEX,
|
||||
};
|
||||
pub use proxmox_schema::api_types::{MULTI_LINE_COMMENT_REGEX, SINGLE_LINE_COMMENT_REGEX};
|
||||
pub use proxmox_schema::api_types::{PASSWORD_REGEX, SYSTEMD_DATETIME_REGEX, UUID_REGEX};
|
||||
|
||||
pub use proxmox_schema::api_types::{CIDR_FORMAT, CIDR_REGEX};
|
||||
pub use proxmox_schema::api_types::{CIDR_V4_FORMAT, CIDR_V4_REGEX};
|
||||
pub use proxmox_schema::api_types::{CIDR_V6_FORMAT, CIDR_V6_REGEX};
|
||||
pub use proxmox_schema::api_types::{IPRE_STR, IP_FORMAT, IP_REGEX};
|
||||
pub use proxmox_schema::api_types::{IPV4RE_STR, IP_V4_FORMAT, IP_V4_REGEX};
|
||||
pub use proxmox_schema::api_types::{IPV6RE_STR, IP_V6_FORMAT, IP_V6_REGEX};
|
||||
|
||||
pub use proxmox_schema::api_types::COMMENT_SCHEMA as SINGLE_LINE_COMMENT_SCHEMA;
|
||||
pub use proxmox_schema::api_types::HOSTNAME_SCHEMA;
|
||||
pub use proxmox_schema::api_types::HOST_PORT_SCHEMA;
|
||||
pub use proxmox_schema::api_types::HTTP_URL_SCHEMA;
|
||||
pub use proxmox_schema::api_types::MULTI_LINE_COMMENT_SCHEMA;
|
||||
pub use proxmox_schema::api_types::NODE_SCHEMA;
|
||||
pub use proxmox_schema::api_types::SINGLE_LINE_COMMENT_FORMAT;
|
||||
pub use proxmox_schema::api_types::{
|
||||
BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
|
||||
};
|
||||
pub use proxmox_schema::api_types::{CERT_FINGERPRINT_SHA256_SCHEMA, FINGERPRINT_SHA256_FORMAT};
|
||||
pub use proxmox_schema::api_types::{DISK_ARRAY_SCHEMA, DISK_LIST_SCHEMA};
|
||||
pub use proxmox_schema::api_types::{DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, DNS_NAME_OR_IP_SCHEMA};
|
||||
pub use proxmox_schema::api_types::{PASSWORD_FORMAT, PASSWORD_SCHEMA};
|
||||
pub use proxmox_schema::api_types::{SERVICE_ID_SCHEMA, UUID_FORMAT};
|
||||
pub use proxmox_schema::api_types::{SYSTEMD_DATETIME_FORMAT, TIME_ZONE_SCHEMA};
|
||||
|
||||
use proxmox_schema::api_types::{DNS_NAME_STR, IPRE_BRACKET_STR};
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_ID_RE: &str = r"[A-Za-z0-9_][A-Za-z0-9._\-]*";
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_TYPE_RE: &str = r"(?:host|vm|ct)";
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_TIME_RE: &str = r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z";
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_NS_RE: &str =
|
||||
concatcp!("(?:",
|
||||
"(?:", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR,
|
||||
")?");
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_NS_PATH_RE: &str =
|
||||
concatcp!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/");
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const SNAPSHOT_PATH_REGEX_STR: &str =
|
||||
concatcp!(
|
||||
r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")/(", BACKUP_TIME_RE, r")",
|
||||
);
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const GROUP_OR_SNAPSHOT_PATH_REGEX_STR: &str =
|
||||
concatcp!(
|
||||
r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")(?:/(", BACKUP_TIME_RE, r"))?",
|
||||
);
|
||||
|
||||
mod acl;
|
||||
pub use acl::*;
|
||||
|
||||
mod datastore;
|
||||
pub use datastore::*;
|
||||
|
||||
mod jobs;
|
||||
pub use jobs::*;
|
||||
|
||||
mod key_derivation;
|
||||
pub use key_derivation::{Kdf, KeyInfo};
|
||||
|
||||
mod maintenance;
|
||||
pub use maintenance::*;
|
||||
|
||||
mod network;
|
||||
pub use network::*;
|
||||
|
||||
mod node;
|
||||
pub use node::*;
|
||||
|
||||
pub use proxmox_auth_api::types as userid;
|
||||
pub use proxmox_auth_api::types::{Authid, Userid};
|
||||
pub use proxmox_auth_api::types::{Realm, RealmRef};
|
||||
pub use proxmox_auth_api::types::{Tokenname, TokennameRef};
|
||||
pub use proxmox_auth_api::types::{Username, UsernameRef};
|
||||
pub use proxmox_auth_api::types::{
|
||||
PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA,
|
||||
};
|
||||
|
||||
#[macro_use]
|
||||
mod user;
|
||||
pub use user::*;
|
||||
|
||||
pub use proxmox_schema::upid::*;
|
||||
|
||||
mod crypto;
|
||||
pub use crypto::{bytes_as_fingerprint, CryptMode, Fingerprint};
|
||||
|
||||
pub mod file_restore;
|
||||
|
||||
mod openid;
|
||||
pub use openid::*;
|
||||
|
||||
mod ldap;
|
||||
pub use ldap::*;
|
||||
|
||||
mod ad;
|
||||
pub use ad::*;
|
||||
|
||||
mod remote;
|
||||
pub use remote::*;
|
||||
|
||||
mod tape;
|
||||
pub use tape::*;
|
||||
|
||||
mod traffic_control;
|
||||
pub use traffic_control::*;
|
||||
|
||||
mod zfs;
|
||||
pub use zfs::*;
|
||||
|
||||
mod metrics;
|
||||
pub use metrics::*;
|
||||
|
||||
const_regex! {
|
||||
// just a rough check - dummy acceptor is used before persisting
|
||||
pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$";
|
||||
|
||||
pub BACKUP_REPO_URL_REGEX = concatcp!(
|
||||
r"^^(?:(?:(",
|
||||
USER_ID_REGEX_STR, "|", APITOKEN_ID_REGEX_STR,
|
||||
")@)?(",
|
||||
DNS_NAME_STR, "|", IPRE_BRACKET_STR,
|
||||
"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR, r")$"
|
||||
);
|
||||
|
||||
pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
|
||||
}
|
||||
|
||||
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||
|
||||
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
|
||||
|
||||
pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX);
|
||||
|
||||
pub const DAILY_DURATION_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop));
|
||||
|
||||
pub const SEARCH_DOMAIN_SCHEMA: Schema =
|
||||
StringSchema::new("Search domain for host-name lookup.").schema();
|
||||
|
||||
pub const FIRST_DNS_SERVER_SCHEMA: Schema = StringSchema::new("First name server IP address.")
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const SECOND_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Second name server IP address.")
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const THIRD_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Third name server IP address.")
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema =
|
||||
StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2")
|
||||
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema =
|
||||
StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLS 1.3")
|
||||
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(5)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SUBSCRIPTION_KEY_SCHEMA: Schema =
|
||||
StringSchema::new("Proxmox Backup Server subscription key.")
|
||||
.format(&SUBSCRIPTION_KEY_FORMAT)
|
||||
.min_length(15)
|
||||
.max_length(16)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
|
||||
"Prevent changes if current configuration file has different \
|
||||
SHA256 digest. This can be used to prevent concurrent \
|
||||
modifications.",
|
||||
)
|
||||
.format(&PVE_CONFIG_DIGEST_FORMAT)
|
||||
.schema();
|
||||
|
||||
/// API schema format definition for repository URLs
|
||||
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
|
||||
|
||||
// Complex type definitions
|
||||
|
||||
#[api()]
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
/// Storage space usage information.
|
||||
pub struct StorageStatus {
|
||||
/// Total space (bytes).
|
||||
pub total: u64,
|
||||
/// Used space (bytes).
|
||||
pub used: u64,
|
||||
/// Available space (bytes).
|
||||
pub avail: u64,
|
||||
}
|
||||
|
||||
pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "PascalCase")]
|
||||
/// Describes a package for which an update is available.
|
||||
pub struct APTUpdateInfo {
|
||||
/// Package name
|
||||
pub package: String,
|
||||
/// Package title
|
||||
pub title: String,
|
||||
/// Package architecture
|
||||
pub arch: String,
|
||||
/// Human readable package description
|
||||
pub description: String,
|
||||
/// New version to be updated to
|
||||
pub version: String,
|
||||
/// Old version currently installed
|
||||
pub old_version: String,
|
||||
/// Package origin
|
||||
pub origin: String,
|
||||
/// Package priority in human-readable form
|
||||
pub priority: String,
|
||||
/// Package section
|
||||
pub section: String,
|
||||
/// Custom extra field for additional package information
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub extra_info: Option<String>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Node Power command type.
|
||||
pub enum NodePowerCommand {
|
||||
/// Restart the server
|
||||
Reboot,
|
||||
/// Shutdown the server
|
||||
Shutdown,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum TaskStateType {
|
||||
/// Ok
|
||||
OK,
|
||||
/// Warning
|
||||
Warning,
|
||||
/// Error
|
||||
Error,
|
||||
/// Unknown
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
upid: { schema: UPID::API_SCHEMA },
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
/// Task properties.
|
||||
pub struct TaskListItem {
|
||||
pub upid: String,
|
||||
/// The node name where the task is running on.
|
||||
pub node: String,
|
||||
/// The Unix PID
|
||||
pub pid: i64,
|
||||
/// The task start time (Epoch)
|
||||
pub pstart: u64,
|
||||
/// The task start time (Epoch)
|
||||
pub starttime: i64,
|
||||
/// Worker type (arbitrary ASCII string)
|
||||
pub worker_type: String,
|
||||
/// Worker ID (arbitrary ASCII string)
|
||||
pub worker_id: Option<String>,
|
||||
/// The authenticated entity who started the task
|
||||
pub user: String,
|
||||
/// The task end time (Epoch)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub endtime: Option<i64>,
|
||||
/// Task end status
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<String>,
|
||||
}
|
||||
|
||||
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
|
||||
optional: false,
|
||||
schema: &ArraySchema::new("A list of tasks.", &TaskListItem::API_SCHEMA).schema(),
|
||||
};
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "UPPERCASE")]
|
||||
/// RRD consolidation mode
|
||||
pub enum RRDMode {
|
||||
/// Maximum
|
||||
Max,
|
||||
/// Average
|
||||
Average,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// RRD time frame
|
||||
pub enum RRDTimeFrame {
|
||||
/// Hour
|
||||
Hour,
|
||||
/// Day
|
||||
Day,
|
||||
/// Week
|
||||
Week,
|
||||
/// Month
|
||||
Month,
|
||||
/// Year
|
||||
Year,
|
||||
/// Decade (10 years)
|
||||
Decade,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Deserialize, Serialize, Copy, Clone, PartialEq, Eq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// type of the realm
|
||||
pub enum RealmType {
|
||||
/// The PAM realm
|
||||
Pam,
|
||||
/// The PBS realm
|
||||
Pbs,
|
||||
/// An OpenID Connect realm
|
||||
OpenId,
|
||||
/// An LDAP realm
|
||||
Ldap,
|
||||
/// An Active Directory (AD) realm
|
||||
Ad,
|
||||
}
|
||||
|
||||
serde_plain::derive_display_from_serialize!(RealmType);
|
||||
serde_plain::derive_fromstr_from_deserialize!(RealmType);
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
realm: {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
type: RealmType,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Basic Information about a realm
|
||||
pub struct BasicRealmInfo {
|
||||
pub realm: String,
|
||||
#[serde(rename = "type")]
|
||||
pub ty: RealmType,
|
||||
/// True if it is the default realm
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub default: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
@ -1,106 +0,0 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::borrow::Cow;
|
||||
|
||||
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
|
||||
|
||||
const_regex! {
|
||||
pub MAINTENANCE_MESSAGE_REGEX = r"^[[:^cntrl:]]*$";
|
||||
}
|
||||
|
||||
pub const MAINTENANCE_MESSAGE_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&MAINTENANCE_MESSAGE_REGEX);
|
||||
|
||||
pub const MAINTENANCE_MESSAGE_SCHEMA: Schema =
|
||||
StringSchema::new("Message describing the reason for the maintenance.")
|
||||
.format(&MAINTENANCE_MESSAGE_FORMAT)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
/// Operation requirements, used when checking for maintenance mode.
|
||||
pub enum Operation {
|
||||
/// for any read operation like backup restore or RRD metric collection
|
||||
Read,
|
||||
/// for any write/delete operation, like backup create or GC
|
||||
Write,
|
||||
/// for any purely logical operation on the in-memory state of the datastore, e.g., to check if
|
||||
/// some mutex could be locked (e.g., GC already running?)
|
||||
///
|
||||
/// NOTE: one must *not* do any IO operations when only helding this Op state
|
||||
Lookup,
|
||||
// GarbageCollect or Delete?
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Maintenance type.
|
||||
pub enum MaintenanceType {
|
||||
// TODO:
|
||||
// - Add "unmounting" once we got pluggable datastores
|
||||
// - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate
|
||||
// operation, so that one can enable a mode where nothing new can be added but stuff can be
|
||||
// cleaned
|
||||
/// Only read operations are allowed on the datastore.
|
||||
ReadOnly,
|
||||
/// Neither read nor write operations are allowed on the datastore.
|
||||
Offline,
|
||||
/// The datastore is being deleted.
|
||||
Delete,
|
||||
}
|
||||
serde_plain::derive_display_from_serialize!(MaintenanceType);
|
||||
serde_plain::derive_fromstr_from_deserialize!(MaintenanceType);
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
type: {
|
||||
type: MaintenanceType,
|
||||
},
|
||||
message: {
|
||||
optional: true,
|
||||
schema: MAINTENANCE_MESSAGE_SCHEMA,
|
||||
}
|
||||
},
|
||||
default_key: "type",
|
||||
)]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
/// Maintenance mode
|
||||
pub struct MaintenanceMode {
|
||||
/// Type of maintenance ("read-only" or "offline").
|
||||
#[serde(rename = "type")]
|
||||
pub ty: MaintenanceType,
|
||||
|
||||
/// Reason for maintenance.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub message: Option<String>,
|
||||
}
|
||||
|
||||
impl MaintenanceMode {
|
||||
/// Used for deciding whether the datastore is cleared from the internal cache after the last
|
||||
/// task finishes, so all open files are closed.
|
||||
pub fn is_offline(&self) -> bool {
|
||||
self.ty == MaintenanceType::Offline
|
||||
}
|
||||
|
||||
pub fn check(&self, operation: Option<Operation>) -> Result<(), Error> {
|
||||
if self.ty == MaintenanceType::Delete {
|
||||
bail!("datastore is being deleted");
|
||||
}
|
||||
|
||||
let message = percent_encoding::percent_decode_str(self.message.as_deref().unwrap_or(""))
|
||||
.decode_utf8()
|
||||
.unwrap_or(Cow::Borrowed(""));
|
||||
|
||||
if let Some(Operation::Lookup) = operation {
|
||||
return Ok(());
|
||||
} else if self.ty == MaintenanceType::Offline {
|
||||
bail!("offline maintenance mode: {}", message);
|
||||
} else if self.ty == MaintenanceType::ReadOnly {
|
||||
if let Some(Operation::Write) = operation {
|
||||
bail!("read-only maintenance mode: {}", message);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,189 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
HOST_PORT_SCHEMA, HTTP_URL_SCHEMA, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
use proxmox_schema::{api, Schema, StringSchema, Updater};
|
||||
|
||||
pub const METRIC_SERVER_ID_SCHEMA: Schema = StringSchema::new("Metrics Server ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const INFLUXDB_BUCKET_SCHEMA: Schema = StringSchema::new("InfluxDB Bucket.")
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.default("proxmox")
|
||||
.schema();
|
||||
|
||||
pub const INFLUXDB_ORGANIZATION_SCHEMA: Schema = StringSchema::new("InfluxDB Organization.")
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.default("proxmox")
|
||||
.schema();
|
||||
|
||||
fn return_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn is_true(b: &bool) -> bool {
|
||||
*b
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: METRIC_SERVER_ID_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
host: {
|
||||
schema: HOST_PORT_SCHEMA,
|
||||
},
|
||||
mtu: {
|
||||
type: u16,
|
||||
optional: true,
|
||||
default: 1500,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// InfluxDB Server (UDP)
|
||||
pub struct InfluxDbUdp {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
#[serde(default = "return_true", skip_serializing_if = "is_true")]
|
||||
#[updater(serde(skip_serializing_if = "Option::is_none"))]
|
||||
/// Enables or disables the metrics server
|
||||
pub enable: bool,
|
||||
/// the host + port
|
||||
pub host: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The MTU
|
||||
pub mtu: Option<u16>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: METRIC_SERVER_ID_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
url: {
|
||||
schema: HTTP_URL_SCHEMA,
|
||||
},
|
||||
token: {
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
bucket: {
|
||||
schema: INFLUXDB_BUCKET_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
organization: {
|
||||
schema: INFLUXDB_ORGANIZATION_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"max-body-size": {
|
||||
type: usize,
|
||||
optional: true,
|
||||
default: 25_000_000,
|
||||
},
|
||||
"verify-tls": {
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// InfluxDB Server (HTTP(s))
|
||||
pub struct InfluxDbHttp {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
#[serde(default = "return_true", skip_serializing_if = "is_true")]
|
||||
#[updater(serde(skip_serializing_if = "Option::is_none"))]
|
||||
/// Enables or disables the metrics server
|
||||
pub enable: bool,
|
||||
/// The base url of the influxdb server
|
||||
pub url: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The (optional) API token
|
||||
pub token: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Named location where time series data is stored
|
||||
pub bucket: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Workspace for a group of users
|
||||
pub organization: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The (optional) maximum body size
|
||||
pub max_body_size: Option<usize>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// If true, the certificate will be validated.
|
||||
pub verify_tls: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
/// Type of the metric server
|
||||
pub enum MetricServerType {
|
||||
/// InfluxDB HTTP
|
||||
#[serde(rename = "influxdb-http")]
|
||||
InfluxDbHttp,
|
||||
/// InfluxDB UDP
|
||||
#[serde(rename = "influxdb-udp")]
|
||||
InfluxDbUdp,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: METRIC_SERVER_ID_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
type: MetricServerType,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Basic information about a metric server that's available for all types
|
||||
pub struct MetricServerInfo {
|
||||
pub name: String,
|
||||
#[serde(rename = "type")]
|
||||
pub ty: MetricServerType,
|
||||
/// Enables or disables the metrics server
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
/// The target server
|
||||
pub server: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
@ -1,345 +0,0 @@
|
||||
use std::fmt;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::*;
|
||||
|
||||
use crate::{
|
||||
CIDR_FORMAT, CIDR_V4_FORMAT, CIDR_V6_FORMAT, IP_FORMAT, IP_V4_FORMAT, IP_V6_FORMAT,
|
||||
PROXMOX_SAFE_ID_REGEX,
|
||||
};
|
||||
|
||||
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||
|
||||
pub const IP_V4_SCHEMA: Schema = StringSchema::new("IPv4 address.")
|
||||
.format(&IP_V4_FORMAT)
|
||||
.max_length(15)
|
||||
.schema();
|
||||
|
||||
pub const IP_V6_SCHEMA: Schema = StringSchema::new("IPv6 address.")
|
||||
.format(&IP_V6_FORMAT)
|
||||
.max_length(39)
|
||||
.schema();
|
||||
|
||||
pub const IP_SCHEMA: Schema = StringSchema::new("IP (IPv4 or IPv6) address.")
|
||||
.format(&IP_FORMAT)
|
||||
.max_length(39)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_V4_SCHEMA: Schema = StringSchema::new("IPv4 address with netmask (CIDR notation).")
|
||||
.format(&CIDR_V4_FORMAT)
|
||||
.max_length(18)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_V6_SCHEMA: Schema = StringSchema::new("IPv6 address with netmask (CIDR notation).")
|
||||
.format(&CIDR_V6_FORMAT)
|
||||
.max_length(43)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_SCHEMA: Schema =
|
||||
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
|
||||
.format(&CIDR_FORMAT)
|
||||
.max_length(43)
|
||||
.schema();
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Interface configuration method
|
||||
pub enum NetworkConfigMethod {
|
||||
/// Configuration is done manually using other tools
|
||||
Manual,
|
||||
/// Define interfaces with statically allocated addresses.
|
||||
Static,
|
||||
/// Obtain an address via DHCP
|
||||
DHCP,
|
||||
/// Define the loopback interface.
|
||||
Loopback,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[repr(u8)]
|
||||
/// Linux Bond Mode
|
||||
pub enum LinuxBondMode {
|
||||
/// Round-robin policy
|
||||
BalanceRr = 0,
|
||||
/// Active-backup policy
|
||||
ActiveBackup = 1,
|
||||
/// XOR policy
|
||||
BalanceXor = 2,
|
||||
/// Broadcast policy
|
||||
Broadcast = 3,
|
||||
/// IEEE 802.3ad Dynamic link aggregation
|
||||
#[serde(rename = "802.3ad")]
|
||||
Ieee802_3ad = 4,
|
||||
/// Adaptive transmit load balancing
|
||||
BalanceTlb = 5,
|
||||
/// Adaptive load balancing
|
||||
BalanceAlb = 6,
|
||||
}
|
||||
|
||||
impl fmt::Display for LinuxBondMode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.write_str(match self {
|
||||
LinuxBondMode::BalanceRr => "balance-rr",
|
||||
LinuxBondMode::ActiveBackup => "active-backup",
|
||||
LinuxBondMode::BalanceXor => "balance-xor",
|
||||
LinuxBondMode::Broadcast => "broadcast",
|
||||
LinuxBondMode::Ieee802_3ad => "802.3ad",
|
||||
LinuxBondMode::BalanceTlb => "balance-tlb",
|
||||
LinuxBondMode::BalanceAlb => "balance-alb",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[repr(u8)]
|
||||
/// Bond Transmit Hash Policy for LACP (802.3ad)
|
||||
pub enum BondXmitHashPolicy {
|
||||
/// Layer 2
|
||||
Layer2 = 0,
|
||||
/// Layer 2+3
|
||||
#[serde(rename = "layer2+3")]
|
||||
Layer2_3 = 1,
|
||||
/// Layer 3+4
|
||||
#[serde(rename = "layer3+4")]
|
||||
Layer3_4 = 2,
|
||||
}
|
||||
|
||||
impl fmt::Display for BondXmitHashPolicy {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.write_str(match self {
|
||||
BondXmitHashPolicy::Layer2 => "layer2",
|
||||
BondXmitHashPolicy::Layer2_3 => "layer2+3",
|
||||
BondXmitHashPolicy::Layer3_4 => "layer3+4",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Network interface type
|
||||
pub enum NetworkInterfaceType {
|
||||
/// Loopback
|
||||
Loopback,
|
||||
/// Physical Ethernet device
|
||||
Eth,
|
||||
/// Linux Bridge
|
||||
Bridge,
|
||||
/// Linux Bond
|
||||
Bond,
|
||||
/// Linux VLAN (eth.10)
|
||||
Vlan,
|
||||
/// Interface Alias (eth:1)
|
||||
Alias,
|
||||
/// Unknown interface type
|
||||
Unknown,
|
||||
}
|
||||
|
||||
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
|
||||
.format(&NETWORK_INTERFACE_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(15) // libc::IFNAMSIZ-1
|
||||
.schema();
|
||||
|
||||
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema =
|
||||
ArraySchema::new("Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA).schema();
|
||||
|
||||
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema =
|
||||
StringSchema::new("A list of network devices, comma separated.")
|
||||
.format(&ApiStringFormat::PropertyString(
|
||||
&NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
type: NetworkInterfaceType,
|
||||
},
|
||||
method: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
method6: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
cidr: {
|
||||
schema: CIDR_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
cidr6: {
|
||||
schema: CIDR_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway: {
|
||||
schema: IP_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway6: {
|
||||
schema: IP_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
options: {
|
||||
description: "Option list (inet)",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Optional attribute line.",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
options6: {
|
||||
description: "Option list (inet6)",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Optional attribute line.",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
comments: {
|
||||
description: "Comments (inet, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
comments6: {
|
||||
description: "Comments (inet6, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
bridge_ports: {
|
||||
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
slaves: {
|
||||
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"vlan-id": {
|
||||
description: "VLAN ID.",
|
||||
type: u16,
|
||||
optional: true,
|
||||
},
|
||||
"vlan-raw-device": {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_mode: {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
},
|
||||
"bond-primary": {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_xmit_hash_policy: {
|
||||
type: BondXmitHashPolicy,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
/// Network Interface configuration
|
||||
pub struct Interface {
|
||||
/// Autostart interface
|
||||
#[serde(rename = "autostart")]
|
||||
pub autostart: bool,
|
||||
/// Interface is active (UP)
|
||||
pub active: bool,
|
||||
/// Interface name
|
||||
pub name: String,
|
||||
/// Interface type
|
||||
#[serde(rename = "type")]
|
||||
pub interface_type: NetworkInterfaceType,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub method: Option<NetworkConfigMethod>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub method6: Option<NetworkConfigMethod>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// IPv4 address with netmask
|
||||
pub cidr: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// IPv4 gateway
|
||||
pub gateway: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// IPv6 address with netmask
|
||||
pub cidr6: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// IPv6 gateway
|
||||
pub gateway6: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub options: Vec<String>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub options6: Vec<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comments: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comments6: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Maximum Transmission Unit
|
||||
pub mtu: Option<u64>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bridge_ports: Option<Vec<String>>,
|
||||
/// Enable bridge vlan support.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bridge_vlan_aware: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "vlan-id")]
|
||||
pub vlan_id: Option<u16>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "vlan-raw-device")]
|
||||
pub vlan_raw_device: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub slaves: Option<Vec<String>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bond_mode: Option<LinuxBondMode>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "bond-primary")]
|
||||
pub bond_primary: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
|
||||
}
|
||||
|
||||
impl Interface {
|
||||
pub fn new(name: String) -> Self {
|
||||
Self {
|
||||
name,
|
||||
interface_type: NetworkInterfaceType::Unknown,
|
||||
autostart: false,
|
||||
active: false,
|
||||
method: None,
|
||||
method6: None,
|
||||
cidr: None,
|
||||
gateway: None,
|
||||
cidr6: None,
|
||||
gateway6: None,
|
||||
options: Vec::new(),
|
||||
options6: Vec::new(),
|
||||
comments: None,
|
||||
comments6: None,
|
||||
mtu: None,
|
||||
bridge_ports: None,
|
||||
bridge_vlan_aware: None,
|
||||
vlan_id: None,
|
||||
vlan_raw_device: None,
|
||||
slaves: None,
|
||||
bond_mode: None,
|
||||
bond_primary: None,
|
||||
bond_xmit_hash_policy: None,
|
||||
}
|
||||
}
|
||||
}
|
@ -1,162 +0,0 @@
|
||||
use std::ffi::OsStr;
|
||||
|
||||
use proxmox_schema::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::StorageStatus;
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Node memory usage counters
|
||||
pub struct NodeMemoryCounters {
|
||||
/// Total memory
|
||||
pub total: u64,
|
||||
/// Used memory
|
||||
pub used: u64,
|
||||
/// Free memory
|
||||
pub free: u64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Node swap usage counters
|
||||
pub struct NodeSwapCounters {
|
||||
/// Total swap
|
||||
pub total: u64,
|
||||
/// Used swap
|
||||
pub used: u64,
|
||||
/// Free swap
|
||||
pub free: u64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Contains general node information such as the fingerprint`
|
||||
pub struct NodeInformation {
|
||||
/// The SSL Fingerprint
|
||||
pub fingerprint: String,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The current kernel version (output of `uname`)
|
||||
pub struct KernelVersionInformation {
|
||||
/// The systemname/nodename
|
||||
pub sysname: String,
|
||||
/// The kernel release number
|
||||
pub release: String,
|
||||
/// The kernel version
|
||||
pub version: String,
|
||||
/// The machine architecture
|
||||
pub machine: String,
|
||||
}
|
||||
|
||||
impl KernelVersionInformation {
|
||||
pub fn from_uname_parts(
|
||||
sysname: &OsStr,
|
||||
release: &OsStr,
|
||||
version: &OsStr,
|
||||
machine: &OsStr,
|
||||
) -> Self {
|
||||
KernelVersionInformation {
|
||||
sysname: sysname.to_str().map(String::from).unwrap_or_default(),
|
||||
release: release.to_str().map(String::from).unwrap_or_default(),
|
||||
version: version.to_str().map(String::from).unwrap_or_default(),
|
||||
machine: machine.to_str().map(String::from).unwrap_or_default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_legacy(&self) -> String {
|
||||
format!("{} {} {}", self.sysname, self.release, self.version)
|
||||
}
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Copy, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// The possible BootModes
|
||||
pub enum BootMode {
|
||||
/// The BootMode is EFI/UEFI
|
||||
Efi,
|
||||
/// The BootMode is Legacy BIOS
|
||||
LegacyBios,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Holds the Bootmodes
|
||||
pub struct BootModeInformation {
|
||||
/// The BootMode, either Efi or Bios
|
||||
pub mode: BootMode,
|
||||
/// SecureBoot status
|
||||
pub secureboot: bool,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Information about the CPU
|
||||
pub struct NodeCpuInformation {
|
||||
/// The CPU model
|
||||
pub model: String,
|
||||
/// The number of CPU sockets
|
||||
pub sockets: usize,
|
||||
/// The number of CPU cores (incl. threads)
|
||||
pub cpus: usize,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
memory: {
|
||||
type: NodeMemoryCounters,
|
||||
},
|
||||
root: {
|
||||
type: StorageStatus,
|
||||
},
|
||||
swap: {
|
||||
type: NodeSwapCounters,
|
||||
},
|
||||
loadavg: {
|
||||
type: Array,
|
||||
items: {
|
||||
type: Number,
|
||||
description: "the load",
|
||||
}
|
||||
},
|
||||
cpuinfo: {
|
||||
type: NodeCpuInformation,
|
||||
},
|
||||
info: {
|
||||
type: NodeInformation,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// The Node status
|
||||
pub struct NodeStatus {
|
||||
pub memory: NodeMemoryCounters,
|
||||
pub root: StorageStatus,
|
||||
pub swap: NodeSwapCounters,
|
||||
/// The current uptime of the server.
|
||||
pub uptime: u64,
|
||||
/// Load for 1, 5 and 15 minutes.
|
||||
pub loadavg: [f64; 3],
|
||||
/// The current kernel version (NEW struct type).
|
||||
pub current_kernel: KernelVersionInformation,
|
||||
/// The current kernel version (LEGACY string type).
|
||||
pub kversion: String,
|
||||
/// Total CPU usage since last query.
|
||||
pub cpu: f64,
|
||||
/// Total IO wait since last query.
|
||||
pub wait: f64,
|
||||
pub cpuinfo: NodeCpuInformation,
|
||||
pub info: NodeInformation,
|
||||
/// Current boot mode
|
||||
pub boot_info: BootModeInformation,
|
||||
}
|
@ -1,120 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater};
|
||||
|
||||
use super::{
|
||||
GENERIC_URI_REGEX, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA,
|
||||
SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
|
||||
pub const OPENID_SCOPE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||
|
||||
pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.")
|
||||
.format(&OPENID_SCOPE_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema =
|
||||
ArraySchema::new("Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema();
|
||||
|
||||
pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA);
|
||||
|
||||
pub const OPENID_DEFAILT_SCOPE_LIST: &str = "email profile";
|
||||
pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List")
|
||||
.format(&OPENID_SCOPE_LIST_FORMAT)
|
||||
.default(OPENID_DEFAILT_SCOPE_LIST)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GENERIC_URI_REGEX);
|
||||
|
||||
pub const OPENID_ACR_SCHEMA: Schema =
|
||||
StringSchema::new("OpenID Authentication Context Class Reference.")
|
||||
.format(&OPENID_ACR_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_ACR_ARRAY_SCHEMA: Schema =
|
||||
ArraySchema::new("Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema();
|
||||
|
||||
pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA);
|
||||
|
||||
pub const OPENID_ACR_LIST_SCHEMA: Schema = StringSchema::new("OpenID ACR List")
|
||||
.format(&OPENID_ACR_LIST_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new(
|
||||
"Use the value of this attribute/claim as unique user name. It \
|
||||
is up to the identity provider to guarantee the uniqueness. The \
|
||||
OpenID specification only guarantees that Subject ('sub') is \
|
||||
unique. Also make sure that the user is not allowed to change that \
|
||||
attribute by himself!",
|
||||
)
|
||||
.max_length(64)
|
||||
.min_length(1)
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
realm: {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"client-key": {
|
||||
optional: true,
|
||||
},
|
||||
"scopes": {
|
||||
schema: OPENID_SCOPE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"acr-values": {
|
||||
schema: OPENID_ACR_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
prompt: {
|
||||
description: "OpenID Prompt",
|
||||
type: String,
|
||||
format: &PROXMOX_SAFE_ID_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
autocreate: {
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"username-claim": {
|
||||
schema: OPENID_USERNAME_CLAIM_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// OpenID configuration properties.
|
||||
pub struct OpenIdRealmConfig {
|
||||
#[updater(skip)]
|
||||
pub realm: String,
|
||||
/// OpenID Issuer Url
|
||||
pub issuer_url: String,
|
||||
/// OpenID Client ID
|
||||
pub client_id: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub scopes: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub acr_values: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub prompt: Option<String>,
|
||||
/// OpenID Client Key
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub client_key: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Automatically create users if they do not exist.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub autocreate: Option<bool>,
|
||||
#[updater(skip)]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub username_claim: Option<String>,
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
use percent_encoding::{utf8_percent_encode, AsciiSet};
|
||||
|
||||
/// This used to be: `SIMPLE_ENCODE_SET` plus space, `"`, `#`, `<`, `>`, backtick, `?`, `{`, `}`
|
||||
pub const DEFAULT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS // 0..1f and 7e
|
||||
// The SIMPLE_ENCODE_SET adds space and anything >= 0x7e (7e itself is already included above)
|
||||
.add(0x20)
|
||||
.add(0x7f)
|
||||
// the DEFAULT_ENCODE_SET added:
|
||||
.add(b' ')
|
||||
.add(b'"')
|
||||
.add(b'#')
|
||||
.add(b'<')
|
||||
.add(b'>')
|
||||
.add(b'`')
|
||||
.add(b'?')
|
||||
.add(b'{')
|
||||
.add(b'}');
|
||||
|
||||
/// percent encode a url component
|
||||
pub fn percent_encode_component(comp: &str) -> String {
|
||||
utf8_percent_encode(comp, percent_encoding::NON_ALPHANUMERIC).to_string()
|
||||
}
|
@ -1,106 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use proxmox_schema::*;
|
||||
|
||||
pub const REMOTE_PASSWORD_SCHEMA: Schema =
|
||||
StringSchema::new("Password or auth token for remote host.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(1024)
|
||||
.schema();
|
||||
|
||||
pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema =
|
||||
StringSchema::new("Password or auth token for remote host (stored as base64 string).")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(1024)
|
||||
.schema();
|
||||
|
||||
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
host: {
|
||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||
},
|
||||
port: {
|
||||
optional: true,
|
||||
description: "The (optional) port",
|
||||
type: u16,
|
||||
},
|
||||
"auth-id": {
|
||||
type: Authid,
|
||||
},
|
||||
fingerprint: {
|
||||
optional: true,
|
||||
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Remote configuration properties.
|
||||
pub struct RemoteConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
pub host: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub port: Option<u16>,
|
||||
pub auth_id: Authid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub fingerprint: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
config: {
|
||||
type: RemoteConfig,
|
||||
},
|
||||
password: {
|
||||
schema: REMOTE_PASSWORD_BASE64_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Remote properties.
|
||||
pub struct Remote {
|
||||
pub name: String,
|
||||
// Note: The stored password is base64 encoded
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
#[serde(with = "proxmox_serde::string_as_base64")]
|
||||
pub password: String,
|
||||
#[serde(flatten)]
|
||||
pub config: RemoteConfig,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
config: {
|
||||
type: RemoteConfig,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Remote properties.
|
||||
pub struct RemoteWithoutPassword {
|
||||
pub name: String,
|
||||
#[serde(flatten)]
|
||||
pub config: RemoteConfig,
|
||||
}
|
@ -1,134 +0,0 @@
|
||||
//! Types for tape changer API
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{
|
||||
api, ApiStringFormat, ArraySchema, IntegerSchema, Schema, StringSchema, Updater,
|
||||
};
|
||||
|
||||
use crate::{OptionalDeviceIdentification, PROXMOX_SAFE_ID_FORMAT};
|
||||
|
||||
pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifier.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SCSI_CHANGER_PATH_SCHEMA: Schema =
|
||||
StringSchema::new("Path to Linux generic SCSI device (e.g. '/dev/sg4')").schema();
|
||||
|
||||
pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
"Slot list.",
|
||||
&IntegerSchema::new("Slot number").minimum(1).schema(),
|
||||
)
|
||||
.schema();
|
||||
|
||||
pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new(
|
||||
"\
|
||||
A list of slot numbers, comma separated. Those slots are reserved for
|
||||
Import/Export, i.e. any media in those slots are considered to be
|
||||
'offline'.
|
||||
",
|
||||
)
|
||||
.format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: SCSI_CHANGER_PATH_SCHEMA,
|
||||
},
|
||||
"export-slots": {
|
||||
schema: EXPORT_SLOT_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"eject-before-unload": {
|
||||
optional: true,
|
||||
default: false,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// SCSI tape changer
|
||||
pub struct ScsiTapeChanger {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
pub path: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub export_slots: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// if set to true, tapes are ejected manually before unloading
|
||||
pub eject_before_unload: Option<bool>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: ScsiTapeChanger,
|
||||
},
|
||||
info: {
|
||||
type: OptionalDeviceIdentification,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Changer config with optional device identification attributes
|
||||
pub struct ChangerListEntry {
|
||||
#[serde(flatten)]
|
||||
pub config: ScsiTapeChanger,
|
||||
#[serde(flatten)]
|
||||
pub info: OptionalDeviceIdentification,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Mtx Entry Kind
|
||||
pub enum MtxEntryKind {
|
||||
/// Drive
|
||||
Drive,
|
||||
/// Slot
|
||||
Slot,
|
||||
/// Import/Export Slot
|
||||
ImportExport,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"entry-kind": {
|
||||
type: MtxEntryKind,
|
||||
},
|
||||
"label-text": {
|
||||
schema: MEDIA_LABEL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Mtx Status Entry
|
||||
pub struct MtxStatusEntry {
|
||||
pub entry_kind: MtxEntryKind,
|
||||
/// The ID of the slot or drive
|
||||
pub entry_id: u64,
|
||||
/// The media label (volume tag) if the slot/drive is full
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub label_text: Option<String>,
|
||||
/// The slot the drive was loaded from
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub loaded_slot: Option<u64>,
|
||||
/// The current state of the drive
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub state: Option<String>,
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Optional Device Identification Attributes
|
||||
pub struct OptionalDeviceIdentification {
|
||||
/// Vendor (autodetected)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub vendor: Option<String>,
|
||||
/// Model (autodetected)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub model: Option<String>,
|
||||
/// Serial number (autodetected)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub serial: Option<String>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Kind of device
|
||||
pub enum DeviceKind {
|
||||
/// Tape changer (Autoloader, Robot)
|
||||
Changer,
|
||||
/// Normal SCSI tape device
|
||||
Tape,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
kind: {
|
||||
type: DeviceKind,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
/// Tape device information
|
||||
pub struct TapeDeviceInfo {
|
||||
pub kind: DeviceKind,
|
||||
/// Path to the linux device node
|
||||
pub path: String,
|
||||
/// Serial number (autodetected)
|
||||
pub serial: String,
|
||||
/// Vendor (autodetected)
|
||||
pub vendor: String,
|
||||
/// Model (autodetected)
|
||||
pub model: String,
|
||||
/// Device major number
|
||||
pub major: u32,
|
||||
/// Device minor number
|
||||
pub minor: u32,
|
||||
}
|
@ -1,349 +0,0 @@
|
||||
//! Types for tape drive API
|
||||
use anyhow::{bail, Error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater};
|
||||
|
||||
use crate::{OptionalDeviceIdentification, CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
|
||||
|
||||
pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const LTO_DRIVE_PATH_SCHEMA: Schema =
|
||||
StringSchema::new("The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')").schema();
|
||||
|
||||
pub const CHANGER_DRIVENUM_SCHEMA: Schema =
|
||||
IntegerSchema::new("Associated changer drive number (requires option changer)")
|
||||
.minimum(0)
|
||||
.maximum(255)
|
||||
.default(0)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: DRIVE_NAME_SCHEMA,
|
||||
}
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Simulate tape drives (only for test and debug)
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct VirtualTapeDrive {
|
||||
pub name: String,
|
||||
/// Path to directory
|
||||
pub path: String,
|
||||
/// Virtual tape size
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub max_size: Option<usize>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: DRIVE_NAME_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||
},
|
||||
changer: {
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"changer-drivenum": {
|
||||
schema: CHANGER_DRIVENUM_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Lto SCSI tape driver
|
||||
pub struct LtoTapeDrive {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
pub path: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub changer: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub changer_drivenum: Option<u64>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: LtoTapeDrive,
|
||||
},
|
||||
info: {
|
||||
type: OptionalDeviceIdentification,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Drive list entry
|
||||
pub struct DriveListEntry {
|
||||
#[serde(flatten)]
|
||||
pub config: LtoTapeDrive,
|
||||
#[serde(flatten)]
|
||||
pub info: OptionalDeviceIdentification,
|
||||
/// the state of the drive if locked
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub state: Option<String>,
|
||||
/// Current device activity
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub activity: Option<DeviceActivity>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Medium auxiliary memory attributes (MAM)
|
||||
pub struct MamAttribute {
|
||||
/// Attribute id
|
||||
pub id: u16,
|
||||
/// Attribute name
|
||||
pub name: String,
|
||||
/// Attribute value
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize, Copy, Clone, Debug, PartialOrd, PartialEq)]
|
||||
pub enum TapeDensity {
|
||||
/// Unknown (no media loaded)
|
||||
Unknown,
|
||||
/// LTO1
|
||||
LTO1,
|
||||
/// LTO2
|
||||
LTO2,
|
||||
/// LTO3
|
||||
LTO3,
|
||||
/// LTO4
|
||||
LTO4,
|
||||
/// LTO5
|
||||
LTO5,
|
||||
/// LTO6
|
||||
LTO6,
|
||||
/// LTO7
|
||||
LTO7,
|
||||
/// LTO7M8
|
||||
LTO7M8,
|
||||
/// LTO8
|
||||
LTO8,
|
||||
/// LTO9
|
||||
LTO9,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for TapeDensity {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
let density = match value {
|
||||
0x00 => TapeDensity::Unknown,
|
||||
0x40 => TapeDensity::LTO1,
|
||||
0x42 => TapeDensity::LTO2,
|
||||
0x44 => TapeDensity::LTO3,
|
||||
0x46 => TapeDensity::LTO4,
|
||||
0x58 => TapeDensity::LTO5,
|
||||
0x5a => TapeDensity::LTO6,
|
||||
0x5c => TapeDensity::LTO7,
|
||||
0x5d => TapeDensity::LTO7M8,
|
||||
0x5e => TapeDensity::LTO8,
|
||||
0x60 => TapeDensity::LTO9,
|
||||
_ => bail!("unknown tape density code 0x{:02x}", value),
|
||||
};
|
||||
Ok(density)
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
density: {
|
||||
type: TapeDensity,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Drive/Media status for Lto SCSI drives.
|
||||
///
|
||||
/// Media related data is optional - only set if there is a medium
|
||||
/// loaded.
|
||||
pub struct LtoDriveAndMediaStatus {
|
||||
/// Vendor
|
||||
pub vendor: String,
|
||||
/// Product
|
||||
pub product: String,
|
||||
/// Revision
|
||||
pub revision: String,
|
||||
/// Block size (0 is variable size)
|
||||
pub blocksize: u32,
|
||||
/// Compression enabled
|
||||
pub compression: bool,
|
||||
/// Drive buffer mode
|
||||
pub buffer_mode: u8,
|
||||
/// Tape density
|
||||
pub density: TapeDensity,
|
||||
/// Media is write protected
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub write_protect: Option<bool>,
|
||||
/// Tape Alert Flags
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub alert_flags: Option<String>,
|
||||
/// Current file number
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub file_number: Option<u64>,
|
||||
/// Current block number
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub block_number: Option<u64>,
|
||||
/// Medium Manufacture Date (epoch)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub manufactured: Option<i64>,
|
||||
/// Total Bytes Read in Medium Life
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bytes_read: Option<u64>,
|
||||
/// Total Bytes Written in Medium Life
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bytes_written: Option<u64>,
|
||||
/// Number of mounts for the current volume (i.e., Thread Count)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub volume_mounts: Option<u64>,
|
||||
/// Count of the total number of times the medium has passed over
|
||||
/// the head.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub medium_passes: Option<u64>,
|
||||
/// Estimated tape wearout factor (assuming max. 16000 end-to-end passes)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub medium_wearout: Option<f64>,
|
||||
/// Current device activity
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub drive_activity: Option<DeviceActivity>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
/// Volume statistics from SCSI log page 17h
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Lp17VolumeStatistics {
|
||||
/// Volume mounts (thread count)
|
||||
pub volume_mounts: u64,
|
||||
/// Total data sets written
|
||||
pub volume_datasets_written: u64,
|
||||
/// Write retries
|
||||
pub volume_recovered_write_data_errors: u64,
|
||||
/// Total unrecovered write errors
|
||||
pub volume_unrecovered_write_data_errors: u64,
|
||||
/// Total suspended writes
|
||||
pub volume_write_servo_errors: u64,
|
||||
/// Total fatal suspended writes
|
||||
pub volume_unrecovered_write_servo_errors: u64,
|
||||
/// Total datasets read
|
||||
pub volume_datasets_read: u64,
|
||||
/// Total read retries
|
||||
pub volume_recovered_read_errors: u64,
|
||||
/// Total unrecovered read errors
|
||||
pub volume_unrecovered_read_errors: u64,
|
||||
/// Last mount unrecovered write errors
|
||||
pub last_mount_unrecovered_write_errors: u64,
|
||||
/// Last mount unrecovered read errors
|
||||
pub last_mount_unrecovered_read_errors: u64,
|
||||
/// Last mount bytes written
|
||||
pub last_mount_bytes_written: u64,
|
||||
/// Last mount bytes read
|
||||
pub last_mount_bytes_read: u64,
|
||||
/// Lifetime bytes written
|
||||
pub lifetime_bytes_written: u64,
|
||||
/// Lifetime bytes read
|
||||
pub lifetime_bytes_read: u64,
|
||||
/// Last load write compression ratio
|
||||
pub last_load_write_compression_ratio: u64,
|
||||
/// Last load read compression ratio
|
||||
pub last_load_read_compression_ratio: u64,
|
||||
/// Medium mount time
|
||||
pub medium_mount_time: u64,
|
||||
/// Medium ready time
|
||||
pub medium_ready_time: u64,
|
||||
/// Total native capacity
|
||||
pub total_native_capacity: u64,
|
||||
/// Total used native capacity
|
||||
pub total_used_native_capacity: u64,
|
||||
/// Write protect
|
||||
pub write_protect: bool,
|
||||
/// Volume is WORM
|
||||
pub worm: bool,
|
||||
/// Beginning of medium passes
|
||||
pub beginning_of_medium_passes: u64,
|
||||
/// Middle of medium passes
|
||||
pub middle_of_tape_passes: u64,
|
||||
/// Volume serial number
|
||||
pub serial: String,
|
||||
}
|
||||
|
||||
/// The DT Device Activity from DT Device Status LP page
|
||||
#[api]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum DeviceActivity {
|
||||
/// No activity
|
||||
NoActivity,
|
||||
/// Cleaning
|
||||
Cleaning,
|
||||
/// Loading
|
||||
Loading,
|
||||
/// Unloading
|
||||
Unloading,
|
||||
/// Other unspecified activity
|
||||
Other,
|
||||
/// Reading
|
||||
Reading,
|
||||
/// Writing
|
||||
Writing,
|
||||
/// Locating
|
||||
Locating,
|
||||
/// Rewinding
|
||||
Rewinding,
|
||||
/// Erasing
|
||||
Erasing,
|
||||
/// Formatting
|
||||
Formatting,
|
||||
/// Calibrating
|
||||
Calibrating,
|
||||
/// Other (DT)
|
||||
OtherDT,
|
||||
/// Updating microcode
|
||||
MicrocodeUpdate,
|
||||
/// Reading encrypted data
|
||||
ReadingEncrypted,
|
||||
/// Writing encrypted data
|
||||
WritingEncrypted,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for DeviceActivity {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
Ok(match value {
|
||||
0x00 => DeviceActivity::NoActivity,
|
||||
0x01 => DeviceActivity::Cleaning,
|
||||
0x02 => DeviceActivity::Loading,
|
||||
0x03 => DeviceActivity::Unloading,
|
||||
0x04 => DeviceActivity::Other,
|
||||
0x05 => DeviceActivity::Reading,
|
||||
0x06 => DeviceActivity::Writing,
|
||||
0x07 => DeviceActivity::Locating,
|
||||
0x08 => DeviceActivity::Rewinding,
|
||||
0x09 => DeviceActivity::Erasing,
|
||||
0x0A => DeviceActivity::Formatting,
|
||||
0x0B => DeviceActivity::Calibrating,
|
||||
0x0C => DeviceActivity::OtherDT,
|
||||
0x0D => DeviceActivity::MicrocodeUpdate,
|
||||
0x0E => DeviceActivity::ReadingEncrypted,
|
||||
0x0F => DeviceActivity::WritingEncrypted,
|
||||
other => bail!("invalid DT device activity value: {:x}", other),
|
||||
})
|
||||
}
|
||||
}
|
@ -1,179 +0,0 @@
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::*;
|
||||
use proxmox_uuid::Uuid;
|
||||
|
||||
use crate::{MediaLocation, MediaStatus, UUID_FORMAT};
|
||||
|
||||
pub const MEDIA_SET_UUID_SCHEMA: Schema = StringSchema::new(
|
||||
"MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).",
|
||||
)
|
||||
.format(&UUID_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const MEDIA_UUID_SCHEMA: Schema = StringSchema::new("Media Uuid.")
|
||||
.format(&UUID_FORMAT)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"media-set-uuid": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Media Set list entry
|
||||
pub struct MediaSetListEntry {
|
||||
/// Media set name
|
||||
pub media_set_name: String,
|
||||
pub media_set_uuid: Uuid,
|
||||
/// MediaSet creation time stamp
|
||||
pub media_set_ctime: i64,
|
||||
/// Media Pool
|
||||
pub pool: String,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
location: {
|
||||
type: MediaLocation,
|
||||
},
|
||||
status: {
|
||||
type: MediaStatus,
|
||||
},
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
},
|
||||
"media-set-uuid": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Media list entry
|
||||
pub struct MediaListEntry {
|
||||
/// Media label text (or Barcode)
|
||||
pub label_text: String,
|
||||
pub uuid: Uuid,
|
||||
/// Creation time stamp
|
||||
pub ctime: i64,
|
||||
pub location: MediaLocation,
|
||||
pub status: MediaStatus,
|
||||
/// Expired flag
|
||||
pub expired: bool,
|
||||
/// Catalog status OK
|
||||
pub catalog: bool,
|
||||
/// Media set name
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_name: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_uuid: Option<Uuid>,
|
||||
/// Media set seq_nr
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub seq_nr: Option<u64>,
|
||||
/// MediaSet creation time stamp
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_ctime: Option<i64>,
|
||||
/// Media Pool
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pool: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Bytes currently used
|
||||
pub bytes_used: Option<u64>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
},
|
||||
"media-set-uuid": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Media label info
|
||||
pub struct MediaIdFlat {
|
||||
/// Unique ID
|
||||
pub uuid: Uuid,
|
||||
/// Media label text (or Barcode)
|
||||
pub label_text: String,
|
||||
/// Creation time stamp
|
||||
pub ctime: i64,
|
||||
// All MediaSet properties are optional here
|
||||
/// MediaSet Pool
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pool: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_uuid: Option<Uuid>,
|
||||
/// MediaSet media sequence number
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub seq_nr: Option<u64>,
|
||||
/// MediaSet Creation time stamp
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_ctime: Option<i64>,
|
||||
/// Encryption key fingerprint
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub encryption_key_fingerprint: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Label with optional Uuid
|
||||
pub struct LabelUuidMap {
|
||||
/// Changer label text (or Barcode)
|
||||
pub label_text: String,
|
||||
/// Associated Uuid (if any)
|
||||
pub uuid: Option<Uuid>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
},
|
||||
"media-set-uuid": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Media content list entry
|
||||
pub struct MediaContentEntry {
|
||||
/// Media label text (or Barcode)
|
||||
pub label_text: String,
|
||||
/// Media Uuid
|
||||
pub uuid: Uuid,
|
||||
/// Media set name
|
||||
pub media_set_name: String,
|
||||
/// Media set uuid
|
||||
pub media_set_uuid: Uuid,
|
||||
/// MediaSet Creation time stamp
|
||||
pub media_set_ctime: i64,
|
||||
/// Media set seq_nr
|
||||
pub seq_nr: u64,
|
||||
/// Media Pool
|
||||
pub pool: String,
|
||||
/// Datastore Name
|
||||
pub store: String,
|
||||
/// Backup snapshot
|
||||
pub snapshot: String,
|
||||
/// Snapshot creation time (epoch)
|
||||
pub backup_time: i64,
|
||||
}
|
@ -1,80 +0,0 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox_schema::{ApiStringFormat, Schema, StringSchema};
|
||||
|
||||
use crate::{CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
|
||||
|
||||
pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
/// Media location
|
||||
pub enum MediaLocation {
|
||||
/// Ready for use (inside tape library)
|
||||
Online(String),
|
||||
/// Local available, but need to be mounted (insert into tape
|
||||
/// drive)
|
||||
Offline,
|
||||
/// Media is inside a Vault
|
||||
Vault(String),
|
||||
}
|
||||
|
||||
proxmox_serde::forward_deserialize_to_from_str!(MediaLocation);
|
||||
proxmox_serde::forward_serialize_to_display!(MediaLocation);
|
||||
|
||||
impl proxmox_schema::ApiType for MediaLocation {
|
||||
const API_SCHEMA: Schema = StringSchema::new(
|
||||
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')",
|
||||
)
|
||||
.format(&ApiStringFormat::VerifyFn(|text| {
|
||||
let location: MediaLocation = text.parse()?;
|
||||
match location {
|
||||
MediaLocation::Online(ref changer) => {
|
||||
CHANGER_NAME_SCHEMA.parse_simple_value(changer)?;
|
||||
}
|
||||
MediaLocation::Vault(ref vault) => {
|
||||
VAULT_NAME_SCHEMA.parse_simple_value(vault)?;
|
||||
}
|
||||
MediaLocation::Offline => { /* OK */ }
|
||||
}
|
||||
Ok(())
|
||||
}))
|
||||
.schema();
|
||||
}
|
||||
|
||||
impl std::fmt::Display for MediaLocation {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
MediaLocation::Offline => {
|
||||
write!(f, "offline")
|
||||
}
|
||||
MediaLocation::Online(changer) => {
|
||||
write!(f, "online-{}", changer)
|
||||
}
|
||||
MediaLocation::Vault(vault) => {
|
||||
write!(f, "vault-{}", vault)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for MediaLocation {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s == "offline" {
|
||||
return Ok(MediaLocation::Offline);
|
||||
}
|
||||
if let Some(changer) = s.strip_prefix("online-") {
|
||||
return Ok(MediaLocation::Online(changer.to_string()));
|
||||
}
|
||||
if let Some(vault) = s.strip_prefix("vault-") {
|
||||
return Ok(MediaLocation::Vault(vault.to_string()));
|
||||
}
|
||||
|
||||
bail!("MediaLocation parse error");
|
||||
}
|
||||
}
|
@ -1,161 +0,0 @@
|
||||
//! Types for tape media pool API
|
||||
//!
|
||||
//! Note: Both MediaSetPolicy and RetentionPolicy are complex enums,
|
||||
//! so we cannot use them directly for the API. Instead, we represent
|
||||
//! them as String.
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, ApiStringFormat, Schema, StringSchema, Updater};
|
||||
|
||||
use proxmox_time::{CalendarEvent, TimeSpan};
|
||||
|
||||
use crate::{
|
||||
PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
};
|
||||
|
||||
pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const MEDIA_SET_NAMING_TEMPLATE_SCHEMA: Schema = StringSchema::new(
|
||||
"Media set naming template (may contain strftime() time format specifications).",
|
||||
)
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const MEDIA_SET_ALLOCATION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| {
|
||||
MediaSetPolicy::from_str(s)?;
|
||||
Ok(())
|
||||
});
|
||||
|
||||
pub const MEDIA_SET_ALLOCATION_POLICY_SCHEMA: Schema =
|
||||
StringSchema::new("Media set allocation policy ('continue', 'always', or a calendar event).")
|
||||
.format(&MEDIA_SET_ALLOCATION_POLICY_FORMAT)
|
||||
.schema();
|
||||
|
||||
/// Media set allocation policy
|
||||
pub enum MediaSetPolicy {
|
||||
/// Try to use the current media set
|
||||
ContinueCurrent,
|
||||
/// Each backup job creates a new media set
|
||||
AlwaysCreate,
|
||||
/// Create a new set when the specified CalendarEvent triggers
|
||||
CreateAt(CalendarEvent),
|
||||
}
|
||||
|
||||
impl std::str::FromStr for MediaSetPolicy {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s == "continue" {
|
||||
return Ok(MediaSetPolicy::ContinueCurrent);
|
||||
}
|
||||
if s == "always" {
|
||||
return Ok(MediaSetPolicy::AlwaysCreate);
|
||||
}
|
||||
|
||||
let event = s.parse()?;
|
||||
|
||||
Ok(MediaSetPolicy::CreateAt(event))
|
||||
}
|
||||
}
|
||||
|
||||
pub const MEDIA_RETENTION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| {
|
||||
RetentionPolicy::from_str(s)?;
|
||||
Ok(())
|
||||
});
|
||||
|
||||
pub const MEDIA_RETENTION_POLICY_SCHEMA: Schema =
|
||||
StringSchema::new("Media retention policy ('overwrite', 'keep', or time span).")
|
||||
.format(&MEDIA_RETENTION_POLICY_FORMAT)
|
||||
.schema();
|
||||
|
||||
/// Media retention Policy
|
||||
pub enum RetentionPolicy {
|
||||
/// Always overwrite media
|
||||
OverwriteAlways,
|
||||
/// Protect data for the timespan specified
|
||||
ProtectFor(TimeSpan),
|
||||
/// Never overwrite data
|
||||
KeepForever,
|
||||
}
|
||||
|
||||
impl std::str::FromStr for RetentionPolicy {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s == "overwrite" {
|
||||
return Ok(RetentionPolicy::OverwriteAlways);
|
||||
}
|
||||
if s == "keep" {
|
||||
return Ok(RetentionPolicy::KeepForever);
|
||||
}
|
||||
|
||||
let time_span = s.parse()?;
|
||||
|
||||
Ok(RetentionPolicy::ProtectFor(time_span))
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
},
|
||||
allocation: {
|
||||
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
retention: {
|
||||
schema: MEDIA_RETENTION_POLICY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
template: {
|
||||
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
encrypt: {
|
||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
/// Media pool configuration
|
||||
pub struct MediaPoolConfig {
|
||||
/// The pool name
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
/// Media Set allocation policy
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub allocation: Option<String>,
|
||||
/// Media retention policy
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub retention: Option<String>,
|
||||
/// Media set naming template (default "%c")
|
||||
///
|
||||
/// The template is UTF8 text, and can include strftime time
|
||||
/// format specifications.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub template: Option<String>,
|
||||
/// Encryption key fingerprint
|
||||
///
|
||||
/// If set, encrypt all data using the specified key.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub encrypt: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api()]
|
||||
/// Media status
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Media Status
|
||||
pub enum MediaStatus {
|
||||
/// Media is ready to be written
|
||||
Writable,
|
||||
/// Media is full (contains data)
|
||||
Full,
|
||||
/// Media is marked as unknown, needs rescan
|
||||
Unknown,
|
||||
/// Media is marked as damaged
|
||||
Damaged,
|
||||
/// Media is marked as retired
|
||||
Retired,
|
||||
}
|
@ -1,92 +0,0 @@
|
||||
//! Types for tape backup API
|
||||
|
||||
mod device;
|
||||
pub use device::*;
|
||||
|
||||
mod changer;
|
||||
pub use changer::*;
|
||||
|
||||
mod drive;
|
||||
pub use drive::*;
|
||||
|
||||
mod media_pool;
|
||||
pub use media_pool::*;
|
||||
|
||||
mod media_status;
|
||||
pub use media_status::*;
|
||||
|
||||
mod media_location;
|
||||
|
||||
pub use media_location::*;
|
||||
|
||||
mod media;
|
||||
pub use media::*;
|
||||
|
||||
use const_format::concatcp;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
|
||||
use proxmox_uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
BackupType, BACKUP_ID_SCHEMA, BACKUP_NS_PATH_RE, FINGERPRINT_SHA256_FORMAT,
|
||||
PROXMOX_SAFE_ID_REGEX_STR, SNAPSHOT_PATH_REGEX_STR,
|
||||
};
|
||||
|
||||
const_regex! {
|
||||
pub TAPE_RESTORE_SNAPSHOT_REGEX = concatcp!(r"^", PROXMOX_SAFE_ID_REGEX_STR, r":(?:", BACKUP_NS_PATH_RE,")?", SNAPSHOT_PATH_REGEX_STR, r"$");
|
||||
}
|
||||
|
||||
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
|
||||
|
||||
pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema =
|
||||
StringSchema::new("Tape encryption key fingerprint (sha256).")
|
||||
.format(&FINGERPRINT_SHA256_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema =
|
||||
StringSchema::new("A snapshot in the format: 'store:[ns/namespace/...]type/id/time")
|
||||
.format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
|
||||
.type_text("store:[ns/namespace/...]type/id/time")
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
pool: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"label-text": {
|
||||
schema: MEDIA_LABEL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"media": {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"media-set": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-type": {
|
||||
type: BackupType,
|
||||
optional: true,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Content list filter parameters
|
||||
pub struct MediaContentListFilter {
|
||||
pub pool: Option<String>,
|
||||
pub label_text: Option<String>,
|
||||
pub media: Option<Uuid>,
|
||||
pub media_set: Option<Uuid>,
|
||||
pub backup_type: Option<BackupType>,
|
||||
pub backup_id: Option<String>,
|
||||
}
|
@ -1,141 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_human_byte::HumanByte;
|
||||
use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater};
|
||||
|
||||
use crate::{
|
||||
CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
|
||||
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema =
|
||||
StringSchema::new("Timeframe to specify when the rule is active.")
|
||||
.format(&DAILY_DURATION_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema =
|
||||
IntegerSchema::new("Rate limit (for Token bucket filter) in bytes/second.")
|
||||
.minimum(100_000)
|
||||
.schema();
|
||||
|
||||
pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema =
|
||||
IntegerSchema::new("Size of the token bucket (for Token bucket filter) in bytes.")
|
||||
.minimum(1000)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"rate-in": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
"burst-in": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
"rate-out": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
"burst-out": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Clone, Updater, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Rate Limit Configuration
|
||||
pub struct RateLimitConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub rate_in: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub burst_in: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub rate_out: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub burst_out: Option<HumanByte>,
|
||||
}
|
||||
|
||||
impl RateLimitConfig {
|
||||
pub fn with_same_inout(rate: Option<HumanByte>, burst: Option<HumanByte>) -> Self {
|
||||
Self {
|
||||
rate_in: rate,
|
||||
burst_in: burst,
|
||||
rate_out: rate,
|
||||
burst_out: burst,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: TRAFFIC_CONTROL_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
limit: {
|
||||
type: RateLimitConfig,
|
||||
},
|
||||
network: {
|
||||
type: Array,
|
||||
items: {
|
||||
schema: CIDR_SCHEMA,
|
||||
},
|
||||
},
|
||||
timeframe: {
|
||||
type: Array,
|
||||
items: {
|
||||
schema: TRAFFIC_CONTROL_TIMEFRAME_SCHEMA,
|
||||
},
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Traffic control rule
|
||||
pub struct TrafficControlRule {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Rule applies to Source IPs within this networks
|
||||
pub network: Vec<String>,
|
||||
#[serde(flatten)]
|
||||
pub limit: RateLimitConfig,
|
||||
// fixme: expose this?
|
||||
// /// Bandwidth is shared across all connections
|
||||
// #[serde(skip_serializing_if="Option::is_none")]
|
||||
// pub shared: Option<bool>,
|
||||
/// Enable the rule at specific times
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub timeframe: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: TrafficControlRule,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Traffic control rule config with current rates
|
||||
pub struct TrafficControlCurrentRate {
|
||||
#[serde(flatten)]
|
||||
pub config: TrafficControlRule,
|
||||
/// Current ingress rate in bytes/second
|
||||
pub cur_rate_in: u64,
|
||||
/// Current egress rate in bytes/second
|
||||
pub cur_rate_out: u64,
|
||||
}
|
@ -1,226 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater};
|
||||
|
||||
use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA};
|
||||
use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
|
||||
|
||||
pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Enable the account (default). You can set this to '0' to disable the account.",
|
||||
)
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
|
||||
"Account expiration date (seconds since epoch). '0' means no expiration date.",
|
||||
)
|
||||
.default(0)
|
||||
.minimum(0)
|
||||
.schema();
|
||||
|
||||
pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
optional: true,
|
||||
schema: ENABLE_USER_SCHEMA,
|
||||
},
|
||||
expire: {
|
||||
optional: true,
|
||||
schema: EXPIRE_USER_SCHEMA,
|
||||
},
|
||||
firstname: {
|
||||
optional: true,
|
||||
schema: FIRST_NAME_SCHEMA,
|
||||
},
|
||||
lastname: {
|
||||
schema: LAST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
email: {
|
||||
schema: EMAIL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
tokens: {
|
||||
type: Array,
|
||||
optional: true,
|
||||
description: "List of user's API tokens.",
|
||||
items: {
|
||||
type: ApiToken
|
||||
},
|
||||
},
|
||||
"totp-locked": {
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: false,
|
||||
description: "True if the user is currently locked out of TOTP factors",
|
||||
},
|
||||
"tfa-locked-until": {
|
||||
optional: true,
|
||||
description: "Contains a timestamp until when a user is locked out of 2nd factors",
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// User properties with added list of ApiTokens
|
||||
pub struct UserWithTokens {
|
||||
pub userid: Userid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expire: Option<i64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub firstname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub lastname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub email: Option<String>,
|
||||
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub tokens: Vec<ApiToken>,
|
||||
#[serde(skip_serializing_if = "bool_is_false", default)]
|
||||
pub totp_locked: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tfa_locked_until: Option<i64>,
|
||||
}
|
||||
|
||||
fn bool_is_false(b: &bool) -> bool {
|
||||
!b
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
tokenid: {
|
||||
schema: PROXMOX_TOKEN_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
optional: true,
|
||||
schema: ENABLE_USER_SCHEMA,
|
||||
},
|
||||
expire: {
|
||||
optional: true,
|
||||
schema: EXPIRE_USER_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
/// ApiToken properties.
|
||||
pub struct ApiToken {
|
||||
pub tokenid: Authid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expire: Option<i64>,
|
||||
}
|
||||
|
||||
impl ApiToken {
|
||||
pub fn is_active(&self) -> bool {
|
||||
if !self.enable.unwrap_or(true) {
|
||||
return false;
|
||||
}
|
||||
if let Some(expire) = self.expire {
|
||||
let now = proxmox_time::epoch_i64();
|
||||
if expire > 0 && expire <= now {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
optional: true,
|
||||
schema: ENABLE_USER_SCHEMA,
|
||||
},
|
||||
expire: {
|
||||
optional: true,
|
||||
schema: EXPIRE_USER_SCHEMA,
|
||||
},
|
||||
firstname: {
|
||||
optional: true,
|
||||
schema: FIRST_NAME_SCHEMA,
|
||||
},
|
||||
lastname: {
|
||||
schema: LAST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
email: {
|
||||
schema: EMAIL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, PartialEq, Eq)]
|
||||
/// User properties.
|
||||
pub struct User {
|
||||
#[updater(skip)]
|
||||
pub userid: Userid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expire: Option<i64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub firstname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub lastname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub email: Option<String>,
|
||||
}
|
||||
|
||||
impl User {
|
||||
pub fn is_active(&self) -> bool {
|
||||
if !self.enable.unwrap_or(true) {
|
||||
return false;
|
||||
}
|
||||
if let Some(expire) = self.expire {
|
||||
let now = proxmox_time::epoch_i64();
|
||||
if expire > 0 && expire <= now {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
@ -1,78 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::*;
|
||||
|
||||
const_regex! {
|
||||
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||
}
|
||||
|
||||
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new("Pool sector size exponent.")
|
||||
.minimum(9)
|
||||
.maximum(16)
|
||||
.default(12)
|
||||
.schema();
|
||||
|
||||
pub const ZPOOL_NAME_SCHEMA: Schema = StringSchema::new("ZFS Pool Name")
|
||||
.format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX))
|
||||
.schema();
|
||||
|
||||
#[api(default: "On")]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS compression algorithm to use.
|
||||
pub enum ZfsCompressionType {
|
||||
/// Gnu Zip
|
||||
Gzip,
|
||||
/// LZ4
|
||||
Lz4,
|
||||
/// LZJB
|
||||
Lzjb,
|
||||
/// ZLE
|
||||
Zle,
|
||||
/// ZStd
|
||||
ZStd,
|
||||
/// Enable compression using the default algorithm.
|
||||
On,
|
||||
/// Disable compression.
|
||||
Off,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS RAID level to use.
|
||||
pub enum ZfsRaidLevel {
|
||||
/// Single Disk
|
||||
Single,
|
||||
/// Mirror
|
||||
Mirror,
|
||||
/// Raid10
|
||||
Raid10,
|
||||
/// RaidZ
|
||||
RaidZ,
|
||||
/// RaidZ2
|
||||
RaidZ2,
|
||||
/// RaidZ3
|
||||
RaidZ3,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// zpool list item
|
||||
pub struct ZpoolListItem {
|
||||
/// zpool name
|
||||
pub name: String,
|
||||
/// Health
|
||||
pub health: String,
|
||||
/// Total size
|
||||
pub size: u64,
|
||||
/// Used size
|
||||
pub alloc: u64,
|
||||
/// Free space
|
||||
pub free: u64,
|
||||
/// ZFS fragnentation level
|
||||
pub frag: u64,
|
||||
/// ZFS deduplication ratio
|
||||
pub dedup: f64,
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
use pbs_api_types::{BackupGroup, BackupType, GroupFilter};
|
||||
use std::str::FromStr;
|
||||
|
||||
#[test]
|
||||
fn test_no_filters() {
|
||||
let group_filters = vec![];
|
||||
|
||||
let do_backup = [
|
||||
"vm/101", "vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108", "vm/109",
|
||||
];
|
||||
|
||||
for id in do_backup {
|
||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_include_filters() {
|
||||
let group_filters = vec![GroupFilter::from_str("regex:.*10[2-8]").unwrap()];
|
||||
|
||||
let do_backup = [
|
||||
"vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108",
|
||||
];
|
||||
|
||||
let dont_backup = ["vm/101", "vm/109"];
|
||||
|
||||
for id in do_backup {
|
||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
|
||||
for id in dont_backup {
|
||||
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exclude_filters() {
|
||||
let group_filters = [
|
||||
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
|
||||
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
|
||||
];
|
||||
|
||||
let do_backup = ["vm/104", "vm/108", "vm/109"];
|
||||
|
||||
let dont_backup = ["vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107"];
|
||||
|
||||
for id in do_backup {
|
||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
for id in dont_backup {
|
||||
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_include_and_exclude_filters() {
|
||||
let group_filters = [
|
||||
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
|
||||
GroupFilter::from_str("regex:.*10[2-8]").unwrap(),
|
||||
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
|
||||
];
|
||||
|
||||
let do_backup = ["vm/104", "vm/108"];
|
||||
|
||||
let dont_backup = [
|
||||
"vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107", "vm/109",
|
||||
];
|
||||
|
||||
for id in do_backup {
|
||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
|
||||
for id in dont_backup {
|
||||
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
}
|
@ -98,6 +98,8 @@ pub const PROXMOX_BACKUP_KERNEL_FN: &str =
|
||||
|
||||
pub const PROXMOX_BACKUP_SUBSCRIPTION_FN: &str = configdir!("/subscription");
|
||||
|
||||
pub const APT_PKG_STATE_FN: &str = concat!(PROXMOX_BACKUP_STATE_DIR_M!(), "/pkg-state.json");
|
||||
|
||||
/// Prepend configuration directory to a file name
|
||||
///
|
||||
/// This is a simply way to get the full path for configuration files.
|
||||
|
@ -12,11 +12,8 @@ bytes.workspace = true
|
||||
futures.workspace = true
|
||||
h2.workspace = true
|
||||
hex.workspace = true
|
||||
http.workspace = true
|
||||
hyper.workspace = true
|
||||
lazy_static.workspace = true
|
||||
libc.workspace = true
|
||||
log.workspace = true
|
||||
nix.workspace = true
|
||||
openssl.workspace = true
|
||||
percent-encoding.workspace = true
|
||||
@ -39,6 +36,7 @@ proxmox-compression.workspace = true
|
||||
proxmox-http = { workspace = true, features = [ "rate-limiter" ] }
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-io = { workspace = true, features = [ "tokio" ] }
|
||||
proxmox-log = { workspace = true }
|
||||
proxmox-router = { workspace = true, features = [ "cli", "server" ] }
|
||||
proxmox-schema.workspace = true
|
||||
proxmox-sys.workspace = true
|
||||
@ -47,6 +45,5 @@ proxmox-time.workspace = true
|
||||
pxar.workspace = true
|
||||
|
||||
pbs-api-types.workspace = true
|
||||
pbs-buildcfg.workspace = true
|
||||
pbs-datastore.workspace = true
|
||||
pbs-tools.workspace = true
|
||||
|
@ -1,19 +1,17 @@
|
||||
use anyhow::{format_err, Error};
|
||||
use std::fs::File;
|
||||
use std::io::{Seek, SeekFrom, Write};
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::future::AbortHandle;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use pbs_api_types::{BackupDir, BackupNamespace};
|
||||
use pbs_api_types::{BackupArchiveName, BackupDir, BackupNamespace, MANIFEST_BLOB_NAME};
|
||||
use pbs_datastore::data_blob::DataBlob;
|
||||
use pbs_datastore::data_blob_reader::DataBlobReader;
|
||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||
use pbs_datastore::fixed_index::FixedIndexReader;
|
||||
use pbs_datastore::index::IndexFile;
|
||||
use pbs_datastore::manifest::MANIFEST_BLOB_NAME;
|
||||
use pbs_datastore::{BackupManifest, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
|
||||
use pbs_tools::crypt_config::CryptConfig;
|
||||
use pbs_tools::sha::sha256;
|
||||
@ -128,7 +126,8 @@ impl BackupReader {
|
||||
/// The manifest signature is verified if we have a crypt_config.
|
||||
pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
|
||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
|
||||
self.download(MANIFEST_BLOB_NAME.as_ref(), &mut raw_data)
|
||||
.await?;
|
||||
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
||||
// no expected digest available
|
||||
let data = blob.decode(None, None)?;
|
||||
@ -141,20 +140,16 @@ impl BackupReader {
|
||||
|
||||
/// Download a .blob file
|
||||
///
|
||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using
|
||||
/// the provided manifest.
|
||||
/// This creates a temporary file (See [`crate::tools::create_tmp_file`] for
|
||||
/// details). The data is verified using the provided manifest.
|
||||
pub async fn download_blob(
|
||||
&self,
|
||||
manifest: &BackupManifest,
|
||||
name: &str,
|
||||
name: &BackupArchiveName,
|
||||
) -> Result<DataBlobReader<'_, File>, Error> {
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
self.download(name.as_ref(), &mut tmpfile).await?;
|
||||
|
||||
tmpfile.seek(SeekFrom::Start(0))?;
|
||||
let (csum, size) = sha256(&mut tmpfile)?;
|
||||
@ -167,20 +162,16 @@ impl BackupReader {
|
||||
|
||||
/// Download dynamic index file
|
||||
///
|
||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
||||
/// the provided manifest.
|
||||
/// This creates a temporary file (See [`crate::tools::create_tmp_file`] for
|
||||
/// details). The index is verified using the provided manifest.
|
||||
pub async fn download_dynamic_index(
|
||||
&self,
|
||||
manifest: &BackupManifest,
|
||||
name: &str,
|
||||
name: &BackupArchiveName,
|
||||
) -> Result<DynamicIndexReader, Error> {
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
self.download(name.as_ref(), &mut tmpfile).await?;
|
||||
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
|
||||
@ -194,20 +185,16 @@ impl BackupReader {
|
||||
|
||||
/// Download fixed index file
|
||||
///
|
||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
||||
/// the provided manifest.
|
||||
/// This creates a temporary file (See [`crate::tools::create_tmp_file`] for
|
||||
/// details). The index is verified using the provided manifest.
|
||||
pub async fn download_fixed_index(
|
||||
&self,
|
||||
manifest: &BackupManifest,
|
||||
name: &str,
|
||||
name: &BackupArchiveName,
|
||||
) -> Result<FixedIndexReader, Error> {
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
self.download(name.as_ref(), &mut tmpfile).await?;
|
||||
|
||||
let index = FixedIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;
|
||||
|
@ -7,10 +7,12 @@ const_regex! {
|
||||
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
|
||||
}
|
||||
|
||||
pub const BACKUP_SOURCE_SCHEMA: Schema =
|
||||
StringSchema::new("Backup source specification ([<label>:<path>]).")
|
||||
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
||||
.schema();
|
||||
pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
|
||||
"Backup source specification ([<label>:<path>]), the specification \
|
||||
'label' must contain alphanumerics, hyphens and underscores only.",
|
||||
)
|
||||
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
||||
.schema();
|
||||
|
||||
pub enum BackupSpecificationType {
|
||||
PXAR,
|
||||
@ -35,7 +37,7 @@ pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Er
|
||||
"img" => BackupSpecificationType::IMAGE,
|
||||
"conf" => BackupSpecificationType::CONFIG,
|
||||
"log" => BackupSpecificationType::LOGFILE,
|
||||
_ => bail!("unknown backup source type '{}'", extension),
|
||||
_ => bail!("unknown backup source type '{extension}'"),
|
||||
};
|
||||
return Ok(BackupSpecification {
|
||||
archive_name,
|
||||
@ -44,7 +46,7 @@ pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Er
|
||||
});
|
||||
}
|
||||
|
||||
bail!("unable to parse backup source specification '{}'", value);
|
||||
bail!("unable to parse backup source specification '{value}'");
|
||||
}
|
||||
|
||||
#[api]
|
||||
|
119
pbs-client/src/backup_stats.rs
Normal file
@ -0,0 +1,119 @@
|
||||
//! Implements counters to generate statistics for log outputs during uploads with backup writer
|
||||
|
||||
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::pxar::create::ReusableDynamicEntry;
|
||||
|
||||
/// Basic backup run statistics and archive checksum
|
||||
pub struct BackupStats {
|
||||
pub size: u64,
|
||||
pub csum: [u8; 32],
|
||||
pub duration: Duration,
|
||||
pub chunk_count: u64,
|
||||
}
|
||||
|
||||
/// Extended backup run statistics and archive checksum
|
||||
pub(crate) struct UploadStats {
|
||||
pub(crate) chunk_count: usize,
|
||||
pub(crate) chunk_reused: usize,
|
||||
pub(crate) chunk_injected: usize,
|
||||
pub(crate) size: usize,
|
||||
pub(crate) size_reused: usize,
|
||||
pub(crate) size_injected: usize,
|
||||
pub(crate) size_compressed: usize,
|
||||
pub(crate) duration: Duration,
|
||||
pub(crate) csum: [u8; 32],
|
||||
}
|
||||
|
||||
impl UploadStats {
|
||||
/// Convert the upload stats to the more concise [`BackupStats`]
|
||||
#[inline(always)]
|
||||
pub(crate) fn to_backup_stats(&self) -> BackupStats {
|
||||
BackupStats {
|
||||
chunk_count: self.chunk_count as u64,
|
||||
size: self.size as u64,
|
||||
duration: self.duration,
|
||||
csum: self.csum,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Atomic counters for accounting upload stream progress information
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct UploadCounters {
|
||||
injected_chunk_count: Arc<AtomicUsize>,
|
||||
known_chunk_count: Arc<AtomicUsize>,
|
||||
total_chunk_count: Arc<AtomicUsize>,
|
||||
compressed_stream_len: Arc<AtomicU64>,
|
||||
injected_stream_len: Arc<AtomicUsize>,
|
||||
reused_stream_len: Arc<AtomicUsize>,
|
||||
total_stream_len: Arc<AtomicUsize>,
|
||||
}
|
||||
|
||||
impl UploadCounters {
|
||||
/// Create and zero init new upload counters
|
||||
pub(crate) fn new() -> Self {
|
||||
Self {
|
||||
total_chunk_count: Arc::new(AtomicUsize::new(0)),
|
||||
injected_chunk_count: Arc::new(AtomicUsize::new(0)),
|
||||
known_chunk_count: Arc::new(AtomicUsize::new(0)),
|
||||
compressed_stream_len: Arc::new(AtomicU64::new(0)),
|
||||
injected_stream_len: Arc::new(AtomicUsize::new(0)),
|
||||
reused_stream_len: Arc::new(AtomicUsize::new(0)),
|
||||
total_stream_len: Arc::new(AtomicUsize::new(0)),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn add_known_chunk(&mut self, chunk_len: usize) -> usize {
|
||||
self.known_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||
self.total_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||
self.reused_stream_len
|
||||
.fetch_add(chunk_len, Ordering::SeqCst);
|
||||
self.total_stream_len.fetch_add(chunk_len, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn add_new_chunk(&mut self, chunk_len: usize, chunk_raw_size: u64) -> usize {
|
||||
self.total_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||
self.compressed_stream_len
|
||||
.fetch_add(chunk_raw_size, Ordering::SeqCst);
|
||||
self.total_stream_len.fetch_add(chunk_len, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn add_injected_chunk(&mut self, chunk: &ReusableDynamicEntry) -> usize {
|
||||
self.total_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||
self.injected_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
self.reused_stream_len
|
||||
.fetch_add(chunk.size() as usize, Ordering::SeqCst);
|
||||
self.injected_stream_len
|
||||
.fetch_add(chunk.size() as usize, Ordering::SeqCst);
|
||||
self.total_stream_len
|
||||
.fetch_add(chunk.size() as usize, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn total_stream_len(&self) -> usize {
|
||||
self.total_stream_len.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Convert the counters to [`UploadStats`], including given archive checksum and runtime.
|
||||
#[inline(always)]
|
||||
pub(crate) fn to_upload_stats(&self, csum: [u8; 32], duration: Duration) -> UploadStats {
|
||||
UploadStats {
|
||||
chunk_count: self.total_chunk_count.load(Ordering::SeqCst),
|
||||
chunk_reused: self.known_chunk_count.load(Ordering::SeqCst),
|
||||
chunk_injected: self.injected_chunk_count.load(Ordering::SeqCst),
|
||||
size: self.total_stream_len.load(Ordering::SeqCst),
|
||||
size_reused: self.reused_stream_len.load(Ordering::SeqCst),
|
||||
size_injected: self.injected_stream_len.load(Ordering::SeqCst),
|
||||
size_compressed: self.compressed_stream_len.load(Ordering::SeqCst) as usize,
|
||||
duration,
|
||||
csum,
|
||||
}
|
||||
}
|
||||
}
|
@ -1,28 +1,34 @@
|
||||
use std::collections::HashSet;
|
||||
use std::future::Future;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::future::{self, AbortHandle, Either, FutureExt, TryFutureExt};
|
||||
use futures::stream::{Stream, StreamExt, TryStreamExt};
|
||||
use openssl::sha::Sha256;
|
||||
use serde_json::{json, Value};
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
|
||||
use pbs_api_types::{BackupDir, BackupNamespace};
|
||||
use pbs_api_types::{
|
||||
ArchiveType, BackupArchiveName, BackupDir, BackupNamespace, CATALOG_NAME, MANIFEST_BLOB_NAME,
|
||||
};
|
||||
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
|
||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||
use pbs_datastore::fixed_index::FixedIndexReader;
|
||||
use pbs_datastore::index::IndexFile;
|
||||
use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
|
||||
use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
|
||||
use pbs_datastore::manifest::BackupManifest;
|
||||
use pbs_datastore::PROXMOX_BACKUP_PROTOCOL_ID_V1;
|
||||
use pbs_tools::crypt_config::CryptConfig;
|
||||
|
||||
use proxmox_human_byte::HumanByte;
|
||||
use proxmox_log::{debug, enabled, info, trace, warn, Level};
|
||||
use proxmox_time::TimeSpan;
|
||||
|
||||
use super::backup_stats::{BackupStats, UploadCounters, UploadStats};
|
||||
use super::inject_reused_chunks::{InjectChunks, InjectReusedChunks, InjectedChunksInfo};
|
||||
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
|
||||
|
||||
@ -40,11 +46,6 @@ impl Drop for BackupWriter {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BackupStats {
|
||||
pub size: u64,
|
||||
pub csum: [u8; 32],
|
||||
}
|
||||
|
||||
/// Options for uploading blobs/streams to the server
|
||||
#[derive(Default, Clone)]
|
||||
pub struct UploadOptions {
|
||||
@ -54,19 +55,12 @@ pub struct UploadOptions {
|
||||
pub fixed_size: Option<u64>,
|
||||
}
|
||||
|
||||
struct UploadStats {
|
||||
chunk_count: usize,
|
||||
chunk_reused: usize,
|
||||
chunk_injected: usize,
|
||||
struct ChunkUploadResponse {
|
||||
future: h2::legacy::client::ResponseFuture,
|
||||
size: usize,
|
||||
size_reused: usize,
|
||||
size_injected: usize,
|
||||
size_compressed: usize,
|
||||
duration: std::time::Duration,
|
||||
csum: [u8; 32],
|
||||
}
|
||||
|
||||
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
|
||||
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<ChunkUploadResponse>)>;
|
||||
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
|
||||
|
||||
impl BackupWriter {
|
||||
@ -149,7 +143,7 @@ impl BackupWriter {
|
||||
param: Option<Value>,
|
||||
content_type: &str,
|
||||
data: Vec<u8>,
|
||||
) -> Result<h2::client::ResponseFuture, Error> {
|
||||
) -> Result<h2::legacy::client::ResponseFuture, Error> {
|
||||
let request =
|
||||
H2Client::request_builder("localhost", method, path, param, Some(content_type))
|
||||
.unwrap();
|
||||
@ -189,6 +183,7 @@ impl BackupWriter {
|
||||
mut reader: R,
|
||||
file_name: &str,
|
||||
) -> Result<BackupStats, Error> {
|
||||
let start_time = Instant::now();
|
||||
let mut raw_data = Vec::new();
|
||||
// fixme: avoid loading into memory
|
||||
reader.read_to_end(&mut raw_data)?;
|
||||
@ -206,7 +201,12 @@ impl BackupWriter {
|
||||
raw_data,
|
||||
)
|
||||
.await?;
|
||||
Ok(BackupStats { size, csum })
|
||||
Ok(BackupStats {
|
||||
size,
|
||||
csum,
|
||||
duration: start_time.elapsed(),
|
||||
chunk_count: 0,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn upload_blob_from_data(
|
||||
@ -215,6 +215,7 @@ impl BackupWriter {
|
||||
file_name: &str,
|
||||
options: UploadOptions,
|
||||
) -> Result<BackupStats, Error> {
|
||||
let start_time = Instant::now();
|
||||
let blob = match (options.encrypt, &self.crypt_config) {
|
||||
(false, _) => DataBlob::encode(&data, None, options.compress)?,
|
||||
(true, None) => bail!("requested encryption without a crypt config"),
|
||||
@ -238,7 +239,12 @@ impl BackupWriter {
|
||||
raw_data,
|
||||
)
|
||||
.await?;
|
||||
Ok(BackupStats { size, csum })
|
||||
Ok(BackupStats {
|
||||
size,
|
||||
csum,
|
||||
duration: start_time.elapsed(),
|
||||
chunk_count: 0,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn upload_blob_from_file<P: AsRef<std::path::Path>>(
|
||||
@ -263,9 +269,102 @@ impl BackupWriter {
|
||||
.await
|
||||
}
|
||||
|
||||
/// Upload chunks and index
|
||||
pub async fn upload_index_chunk_info(
|
||||
&self,
|
||||
archive_name: &BackupArchiveName,
|
||||
stream: impl Stream<Item = Result<MergedChunkInfo, Error>>,
|
||||
options: UploadOptions,
|
||||
) -> Result<BackupStats, Error> {
|
||||
let mut param = json!({ "archive-name": archive_name });
|
||||
let prefix = if let Some(size) = options.fixed_size {
|
||||
param["size"] = size.into();
|
||||
"fixed"
|
||||
} else {
|
||||
"dynamic"
|
||||
};
|
||||
|
||||
if options.encrypt && self.crypt_config.is_none() {
|
||||
bail!("requested encryption without a crypt config");
|
||||
}
|
||||
|
||||
let wid = self
|
||||
.h2
|
||||
.post(&format!("{prefix}_index"), Some(param))
|
||||
.await?
|
||||
.as_u64()
|
||||
.unwrap();
|
||||
|
||||
let mut counters = UploadCounters::new();
|
||||
let counters_readonly = counters.clone();
|
||||
|
||||
let is_fixed_chunk_size = prefix == "fixed";
|
||||
|
||||
let index_csum = Arc::new(Mutex::new(Some(Sha256::new())));
|
||||
let index_csum_2 = index_csum.clone();
|
||||
|
||||
let stream = stream
|
||||
.and_then(move |mut merged_chunk_info| {
|
||||
match merged_chunk_info {
|
||||
MergedChunkInfo::New(ref chunk_info) => {
|
||||
let chunk_len = chunk_info.chunk_len;
|
||||
let offset =
|
||||
counters.add_new_chunk(chunk_len as usize, chunk_info.chunk.raw_size());
|
||||
let end_offset = offset as u64 + chunk_len;
|
||||
let mut guard = index_csum.lock().unwrap();
|
||||
let csum = guard.as_mut().unwrap();
|
||||
if !is_fixed_chunk_size {
|
||||
csum.update(&end_offset.to_le_bytes());
|
||||
}
|
||||
csum.update(&chunk_info.digest);
|
||||
}
|
||||
MergedChunkInfo::Known(ref mut known_chunk_list) => {
|
||||
for (chunk_len, digest) in known_chunk_list {
|
||||
let offset = counters.add_known_chunk(*chunk_len as usize);
|
||||
let end_offset = offset as u64 + *chunk_len;
|
||||
let mut guard = index_csum.lock().unwrap();
|
||||
let csum = guard.as_mut().unwrap();
|
||||
if !is_fixed_chunk_size {
|
||||
csum.update(&end_offset.to_le_bytes());
|
||||
}
|
||||
csum.update(digest);
|
||||
// Replace size with offset, expected by further stream
|
||||
*chunk_len = offset as u64;
|
||||
}
|
||||
}
|
||||
}
|
||||
future::ok(merged_chunk_info)
|
||||
})
|
||||
.merge_known_chunks();
|
||||
|
||||
let upload_stats = Self::upload_merged_chunk_stream(
|
||||
self.h2.clone(),
|
||||
wid,
|
||||
archive_name,
|
||||
prefix,
|
||||
stream,
|
||||
index_csum_2,
|
||||
counters_readonly,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let param = json!({
|
||||
"wid": wid ,
|
||||
"chunk-count": upload_stats.chunk_count,
|
||||
"size": upload_stats.size,
|
||||
"csum": hex::encode(upload_stats.csum),
|
||||
});
|
||||
let _value = self
|
||||
.h2
|
||||
.post(&format!("{prefix}_close"), Some(param))
|
||||
.await?;
|
||||
|
||||
Ok(upload_stats.to_backup_stats())
|
||||
}
|
||||
|
||||
pub async fn upload_stream(
|
||||
&self,
|
||||
archive_name: &str,
|
||||
archive_name: &BackupArchiveName,
|
||||
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||
options: UploadOptions,
|
||||
injections: Option<std::sync::mpsc::Receiver<InjectChunks>>,
|
||||
@ -291,13 +390,13 @@ impl BackupWriter {
|
||||
if !manifest
|
||||
.files()
|
||||
.iter()
|
||||
.any(|file| file.filename == archive_name)
|
||||
.any(|file| file.filename == archive_name.as_ref())
|
||||
{
|
||||
log::info!("Previous manifest does not contain an archive called '{archive_name}', skipping download..");
|
||||
info!("Previous manifest does not contain an archive called '{archive_name}', skipping download..");
|
||||
} else {
|
||||
// try, but ignore errors
|
||||
match ArchiveType::from_path(archive_name) {
|
||||
Ok(ArchiveType::FixedIndex) => {
|
||||
match archive_name.archive_type() {
|
||||
ArchiveType::FixedIndex => {
|
||||
if let Err(err) = self
|
||||
.download_previous_fixed_index(
|
||||
archive_name,
|
||||
@ -306,10 +405,10 @@ impl BackupWriter {
|
||||
)
|
||||
.await
|
||||
{
|
||||
log::warn!("Error downloading .fidx from previous manifest: {}", err);
|
||||
warn!("Error downloading .fidx from previous manifest: {}", err);
|
||||
}
|
||||
}
|
||||
Ok(ArchiveType::DynamicIndex) => {
|
||||
ArchiveType::DynamicIndex => {
|
||||
if let Err(err) = self
|
||||
.download_previous_dynamic_index(
|
||||
archive_name,
|
||||
@ -318,7 +417,7 @@ impl BackupWriter {
|
||||
)
|
||||
.await
|
||||
{
|
||||
log::warn!("Error downloading .didx from previous manifest: {}", err);
|
||||
warn!("Error downloading .didx from previous manifest: {}", err);
|
||||
}
|
||||
}
|
||||
_ => { /* do nothing */ }
|
||||
@ -346,61 +445,58 @@ impl BackupWriter {
|
||||
},
|
||||
options.compress,
|
||||
injections,
|
||||
archive_name,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let size_dirty = upload_stats.size - upload_stats.size_reused;
|
||||
let size: HumanByte = upload_stats.size.into();
|
||||
let archive = if log::log_enabled!(log::Level::Debug) {
|
||||
archive_name
|
||||
let archive = if enabled!(Level::DEBUG) {
|
||||
archive_name.to_string()
|
||||
} else {
|
||||
pbs_tools::format::strip_server_file_extension(archive_name)
|
||||
archive_name.without_type_extension()
|
||||
};
|
||||
|
||||
if upload_stats.chunk_injected > 0 {
|
||||
log::info!(
|
||||
info!(
|
||||
"{archive}: reused {} from previous snapshot for unchanged files ({} chunks)",
|
||||
HumanByte::from(upload_stats.size_injected),
|
||||
upload_stats.chunk_injected,
|
||||
);
|
||||
}
|
||||
|
||||
if archive_name != CATALOG_NAME {
|
||||
if *archive_name != *CATALOG_NAME {
|
||||
let speed: HumanByte =
|
||||
((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
|
||||
let size_dirty: HumanByte = size_dirty.into();
|
||||
let size_compressed: HumanByte = upload_stats.size_compressed.into();
|
||||
log::info!(
|
||||
info!(
|
||||
"{archive}: had to backup {size_dirty} of {size} (compressed {size_compressed}) in {:.2} s (average {speed}/s)",
|
||||
upload_stats.duration.as_secs_f64()
|
||||
);
|
||||
} else {
|
||||
log::info!("Uploaded backup catalog ({})", size);
|
||||
info!("Uploaded backup catalog ({})", size);
|
||||
}
|
||||
|
||||
if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
|
||||
let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
|
||||
let reused: HumanByte = upload_stats.size_reused.into();
|
||||
log::info!(
|
||||
info!(
|
||||
"{}: backup was done incrementally, reused {} ({:.1}%)",
|
||||
archive,
|
||||
reused,
|
||||
reused_percent
|
||||
archive, reused, reused_percent
|
||||
);
|
||||
}
|
||||
if log::log_enabled!(log::Level::Debug) && upload_stats.chunk_count > 0 {
|
||||
log::debug!(
|
||||
if enabled!(Level::DEBUG) && upload_stats.chunk_count > 0 {
|
||||
debug!(
|
||||
"{}: Reused {} from {} chunks.",
|
||||
archive,
|
||||
upload_stats.chunk_reused,
|
||||
upload_stats.chunk_count
|
||||
archive, upload_stats.chunk_reused, upload_stats.chunk_count
|
||||
);
|
||||
log::debug!(
|
||||
debug!(
|
||||
"{}: Average chunk size was {}.",
|
||||
archive,
|
||||
HumanByte::from(upload_stats.size / upload_stats.chunk_count)
|
||||
);
|
||||
log::debug!(
|
||||
debug!(
|
||||
"{}: Average time per request: {} microseconds.",
|
||||
archive,
|
||||
(upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
|
||||
@ -414,14 +510,11 @@ impl BackupWriter {
|
||||
"csum": hex::encode(upload_stats.csum),
|
||||
});
|
||||
let _value = self.h2.post(&close_path, Some(param)).await?;
|
||||
Ok(BackupStats {
|
||||
size: upload_stats.size as u64,
|
||||
csum: upload_stats.csum,
|
||||
})
|
||||
Ok(upload_stats.to_backup_stats())
|
||||
}
|
||||
|
||||
fn response_queue() -> (
|
||||
mpsc::Sender<h2::client::ResponseFuture>,
|
||||
mpsc::Sender<h2::legacy::client::ResponseFuture>,
|
||||
oneshot::Receiver<Result<(), Error>>,
|
||||
) {
|
||||
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
|
||||
@ -444,11 +537,11 @@ impl BackupWriter {
|
||||
tokio::spawn(
|
||||
ReceiverStream::new(verify_queue_rx)
|
||||
.map(Ok::<_, Error>)
|
||||
.try_for_each(move |response: h2::client::ResponseFuture| {
|
||||
.try_for_each(move |response: h2::legacy::client::ResponseFuture| {
|
||||
response
|
||||
.map_err(Error::from)
|
||||
.and_then(H2Client::h2api_response)
|
||||
.map_ok(move |result| log::debug!("RESPONSE: {:?}", result))
|
||||
.map_ok(move |result| debug!("RESPONSE: {:?}", result))
|
||||
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
||||
})
|
||||
.map(|result| {
|
||||
@ -463,6 +556,7 @@ impl BackupWriter {
|
||||
h2: H2Client,
|
||||
wid: u64,
|
||||
path: String,
|
||||
uploaded: Arc<AtomicUsize>,
|
||||
) -> (UploadQueueSender, UploadResultReceiver) {
|
||||
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64);
|
||||
let (verify_result_tx, verify_result_rx) = oneshot::channel();
|
||||
@ -471,15 +565,21 @@ impl BackupWriter {
|
||||
tokio::spawn(
|
||||
ReceiverStream::new(verify_queue_rx)
|
||||
.map(Ok::<_, Error>)
|
||||
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| {
|
||||
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<ChunkUploadResponse>)| {
|
||||
match (response, merged_chunk_info) {
|
||||
(Some(response), MergedChunkInfo::Known(list)) => {
|
||||
Either::Left(
|
||||
response
|
||||
.future
|
||||
.map_err(Error::from)
|
||||
.and_then(H2Client::h2api_response)
|
||||
.and_then(move |_result| {
|
||||
future::ok(MergedChunkInfo::Known(list))
|
||||
.and_then({
|
||||
let uploaded = uploaded.clone();
|
||||
move |_result| {
|
||||
// account for uploaded bytes for progress output
|
||||
uploaded.fetch_add(response.size, Ordering::SeqCst);
|
||||
future::ok(MergedChunkInfo::Known(list))
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
@ -499,7 +599,7 @@ impl BackupWriter {
|
||||
digest_list.push(hex::encode(digest));
|
||||
offset_list.push(offset);
|
||||
}
|
||||
log::debug!("append chunks list len ({})", digest_list.len());
|
||||
debug!("append chunks list len ({})", digest_list.len());
|
||||
let param = json!({ "wid": wid, "digest-list": digest_list, "offset-list": offset_list });
|
||||
let request = H2Client::request_builder("localhost", "PUT", &path, None, Some("application/json")).unwrap();
|
||||
let param_data = bytes::Bytes::from(param.to_string().into_bytes());
|
||||
@ -527,15 +627,11 @@ impl BackupWriter {
|
||||
|
||||
pub async fn download_previous_fixed_index(
|
||||
&self,
|
||||
archive_name: &str,
|
||||
archive_name: &BackupArchiveName,
|
||||
manifest: &BackupManifest,
|
||||
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
) -> Result<FixedIndexReader, Error> {
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||
|
||||
let param = json!({ "archive-name": archive_name });
|
||||
self.h2
|
||||
@ -555,7 +651,7 @@ impl BackupWriter {
|
||||
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||
}
|
||||
|
||||
log::debug!(
|
||||
debug!(
|
||||
"{}: known chunks list length is {}",
|
||||
archive_name,
|
||||
index.index_count()
|
||||
@ -566,15 +662,11 @@ impl BackupWriter {
|
||||
|
||||
pub async fn download_previous_dynamic_index(
|
||||
&self,
|
||||
archive_name: &str,
|
||||
archive_name: &BackupArchiveName,
|
||||
manifest: &BackupManifest,
|
||||
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
) -> Result<DynamicIndexReader, Error> {
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||
|
||||
let param = json!({ "archive-name": archive_name });
|
||||
self.h2
|
||||
@ -593,7 +685,7 @@ impl BackupWriter {
|
||||
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||
}
|
||||
|
||||
log::debug!(
|
||||
debug!(
|
||||
"{}: known chunks list length is {}",
|
||||
archive_name,
|
||||
index.index_count()
|
||||
@ -617,7 +709,7 @@ impl BackupWriter {
|
||||
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||
|
||||
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
||||
let param = json!({ "archive-name": MANIFEST_BLOB_NAME.to_string() });
|
||||
self.h2
|
||||
.download("previous", Some(param), &mut raw_data)
|
||||
.await?;
|
||||
@ -645,52 +737,26 @@ impl BackupWriter {
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
compress: bool,
|
||||
injections: Option<std::sync::mpsc::Receiver<InjectChunks>>,
|
||||
archive: &BackupArchiveName,
|
||||
) -> impl Future<Output = Result<UploadStats, Error>> {
|
||||
let total_chunks = Arc::new(AtomicUsize::new(0));
|
||||
let total_chunks2 = total_chunks.clone();
|
||||
let known_chunk_count = Arc::new(AtomicUsize::new(0));
|
||||
let known_chunk_count2 = known_chunk_count.clone();
|
||||
let injected_chunk_count = Arc::new(AtomicUsize::new(0));
|
||||
let injected_chunk_count2 = injected_chunk_count.clone();
|
||||
let mut counters = UploadCounters::new();
|
||||
let counters_readonly = counters.clone();
|
||||
|
||||
let stream_len = Arc::new(AtomicUsize::new(0));
|
||||
let stream_len2 = stream_len.clone();
|
||||
let compressed_stream_len = Arc::new(AtomicU64::new(0));
|
||||
let compressed_stream_len2 = compressed_stream_len.clone();
|
||||
let reused_len = Arc::new(AtomicUsize::new(0));
|
||||
let reused_len2 = reused_len.clone();
|
||||
let injected_len = Arc::new(AtomicUsize::new(0));
|
||||
let injected_len2 = injected_len.clone();
|
||||
|
||||
let append_chunk_path = format!("{}_index", prefix);
|
||||
let upload_chunk_path = format!("{}_chunk", prefix);
|
||||
let is_fixed_chunk_size = prefix == "fixed";
|
||||
|
||||
let (upload_queue, upload_result) =
|
||||
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path);
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new())));
|
||||
let index_csum_2 = index_csum.clone();
|
||||
|
||||
stream
|
||||
.inject_reused_chunks(injections, stream_len.clone())
|
||||
let stream = stream
|
||||
.inject_reused_chunks(injections, counters.clone())
|
||||
.and_then(move |chunk_info| match chunk_info {
|
||||
InjectedChunksInfo::Known(chunks) => {
|
||||
// account for injected chunks
|
||||
let count = chunks.len();
|
||||
total_chunks.fetch_add(count, Ordering::SeqCst);
|
||||
injected_chunk_count.fetch_add(count, Ordering::SeqCst);
|
||||
|
||||
let mut known = Vec::new();
|
||||
let mut guard = index_csum.lock().unwrap();
|
||||
let csum = guard.as_mut().unwrap();
|
||||
for chunk in chunks {
|
||||
let offset =
|
||||
stream_len.fetch_add(chunk.size() as usize, Ordering::SeqCst) as u64;
|
||||
reused_len.fetch_add(chunk.size() as usize, Ordering::SeqCst);
|
||||
injected_len.fetch_add(chunk.size() as usize, Ordering::SeqCst);
|
||||
let offset = counters.add_injected_chunk(&chunk) as u64;
|
||||
let digest = chunk.digest();
|
||||
known.push((offset, digest));
|
||||
let end_offset = offset + chunk.size();
|
||||
@ -703,9 +769,6 @@ impl BackupWriter {
|
||||
// account for not injected chunks (new and known)
|
||||
let chunk_len = data.len();
|
||||
|
||||
total_chunks.fetch_add(1, Ordering::SeqCst);
|
||||
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
||||
|
||||
let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress);
|
||||
|
||||
if let Some(ref crypt_config) = crypt_config {
|
||||
@ -713,7 +776,29 @@ impl BackupWriter {
|
||||
}
|
||||
|
||||
let mut known_chunks = known_chunks.lock().unwrap();
|
||||
let digest = chunk_builder.digest();
|
||||
let digest = *chunk_builder.digest();
|
||||
let (offset, res) = if known_chunks.contains(&digest) {
|
||||
let offset = counters.add_known_chunk(chunk_len) as u64;
|
||||
(offset, MergedChunkInfo::Known(vec![(offset, digest)]))
|
||||
} else {
|
||||
match chunk_builder.build() {
|
||||
Ok((chunk, digest)) => {
|
||||
let offset =
|
||||
counters.add_new_chunk(chunk_len, chunk.raw_size()) as u64;
|
||||
known_chunks.insert(digest);
|
||||
(
|
||||
offset,
|
||||
MergedChunkInfo::New(ChunkInfo {
|
||||
chunk,
|
||||
digest,
|
||||
chunk_len: chunk_len as u64,
|
||||
offset,
|
||||
}),
|
||||
)
|
||||
}
|
||||
Err(err) => return future::err(err),
|
||||
}
|
||||
};
|
||||
|
||||
let mut guard = index_csum.lock().unwrap();
|
||||
let csum = guard.as_mut().unwrap();
|
||||
@ -723,29 +808,63 @@ impl BackupWriter {
|
||||
if !is_fixed_chunk_size {
|
||||
csum.update(&chunk_end.to_le_bytes());
|
||||
}
|
||||
csum.update(digest);
|
||||
csum.update(&digest);
|
||||
|
||||
let chunk_is_known = known_chunks.contains(digest);
|
||||
if chunk_is_known {
|
||||
known_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||
reused_len.fetch_add(chunk_len, Ordering::SeqCst);
|
||||
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
|
||||
} else {
|
||||
let compressed_stream_len2 = compressed_stream_len.clone();
|
||||
known_chunks.insert(*digest);
|
||||
future::ready(chunk_builder.build().map(move |(chunk, digest)| {
|
||||
compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst);
|
||||
MergedChunkInfo::New(ChunkInfo {
|
||||
chunk,
|
||||
digest,
|
||||
chunk_len: chunk_len as u64,
|
||||
offset,
|
||||
})
|
||||
}))
|
||||
}
|
||||
future::ok(res)
|
||||
}
|
||||
})
|
||||
.merge_known_chunks()
|
||||
.merge_known_chunks();
|
||||
|
||||
Self::upload_merged_chunk_stream(
|
||||
h2,
|
||||
wid,
|
||||
archive,
|
||||
prefix,
|
||||
stream,
|
||||
index_csum_2,
|
||||
counters_readonly,
|
||||
)
|
||||
}
|
||||
|
||||
fn upload_merged_chunk_stream(
|
||||
h2: H2Client,
|
||||
wid: u64,
|
||||
archive: &BackupArchiveName,
|
||||
prefix: &str,
|
||||
stream: impl Stream<Item = Result<MergedChunkInfo, Error>>,
|
||||
index_csum: Arc<Mutex<Option<Sha256>>>,
|
||||
counters: UploadCounters,
|
||||
) -> impl Future<Output = Result<UploadStats, Error>> {
|
||||
let append_chunk_path = format!("{prefix}_index");
|
||||
let upload_chunk_path = format!("{prefix}_chunk");
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
let uploaded_len = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let (upload_queue, upload_result) =
|
||||
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, uploaded_len.clone());
|
||||
|
||||
let progress_handle = if archive.ends_with(".img.fidx")
|
||||
|| archive.ends_with(".pxar.didx")
|
||||
|| archive.ends_with(".ppxar.didx")
|
||||
{
|
||||
let counters = counters.clone();
|
||||
Some(tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(60)).await;
|
||||
|
||||
let size = HumanByte::from(counters.total_stream_len());
|
||||
let size_uploaded = HumanByte::from(uploaded_len.load(Ordering::SeqCst));
|
||||
let elapsed = TimeSpan::from(start_time.elapsed());
|
||||
|
||||
info!("processed {size} in {elapsed}, uploaded {size_uploaded}");
|
||||
}
|
||||
}))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
stream
|
||||
.try_for_each(move |merged_chunk_info| {
|
||||
let upload_queue = upload_queue.clone();
|
||||
|
||||
@ -754,7 +873,7 @@ impl BackupWriter {
|
||||
let digest = chunk_info.digest;
|
||||
let digest_str = hex::encode(digest);
|
||||
|
||||
log::trace!(
|
||||
trace!(
|
||||
"upload new chunk {} ({} bytes, offset {})",
|
||||
digest_str,
|
||||
chunk_info.chunk_len,
|
||||
@ -785,7 +904,13 @@ impl BackupWriter {
|
||||
Either::Left(h2.send_request(request, upload_data).and_then(
|
||||
move |response| async move {
|
||||
upload_queue
|
||||
.send((new_info, Some(response)))
|
||||
.send((
|
||||
new_info,
|
||||
Some(ChunkUploadResponse {
|
||||
future: response,
|
||||
size: chunk_info.chunk_len as usize,
|
||||
}),
|
||||
))
|
||||
.await
|
||||
.map_err(|err| {
|
||||
format_err!("failed to send to upload queue: {}", err)
|
||||
@ -803,29 +928,14 @@ impl BackupWriter {
|
||||
})
|
||||
.then(move |result| async move { upload_result.await?.and(result) }.boxed())
|
||||
.and_then(move |_| {
|
||||
let duration = start_time.elapsed();
|
||||
let chunk_count = total_chunks2.load(Ordering::SeqCst);
|
||||
let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
|
||||
let chunk_injected = injected_chunk_count2.load(Ordering::SeqCst);
|
||||
let size = stream_len2.load(Ordering::SeqCst);
|
||||
let size_reused = reused_len2.load(Ordering::SeqCst);
|
||||
let size_injected = injected_len2.load(Ordering::SeqCst);
|
||||
let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize;
|
||||
|
||||
let mut guard = index_csum_2.lock().unwrap();
|
||||
let mut guard = index_csum.lock().unwrap();
|
||||
let csum = guard.take().unwrap().finish();
|
||||
|
||||
futures::future::ok(UploadStats {
|
||||
chunk_count,
|
||||
chunk_reused,
|
||||
chunk_injected,
|
||||
size,
|
||||
size_reused,
|
||||
size_injected,
|
||||
size_compressed,
|
||||
duration,
|
||||
csum,
|
||||
})
|
||||
if let Some(handle) = progress_handle {
|
||||
handle.abort();
|
||||
}
|
||||
|
||||
futures::future::ok(counters.to_upload_stats(csum, start_time.elapsed()))
|
||||
})
|
||||
}
|
||||
|
||||
@ -854,7 +964,7 @@ impl BackupWriter {
|
||||
break;
|
||||
}
|
||||
|
||||
log::debug!("send test data ({} bytes)", data.len());
|
||||
debug!("send test data ({} bytes)", data.len());
|
||||
let request =
|
||||
H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
|
||||
let request_future = self
|
||||
@ -869,13 +979,13 @@ impl BackupWriter {
|
||||
|
||||
let _ = upload_result.await?;
|
||||
|
||||
log::info!(
|
||||
info!(
|
||||
"Uploaded {} chunks in {} seconds.",
|
||||
repeat,
|
||||
start_time.elapsed().as_secs()
|
||||
);
|
||||
let speed = ((item_len * (repeat as usize)) as f64) / start_time.elapsed().as_secs_f64();
|
||||
log::info!(
|
||||
info!(
|
||||
"Time per request: {} microseconds.",
|
||||
(start_time.elapsed().as_micros()) / (repeat as u128)
|
||||
);
|
||||
|
@ -14,6 +14,7 @@ use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag};
|
||||
use pbs_api_types::PathPattern;
|
||||
use proxmox_router::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
|
||||
use proxmox_schema::api;
|
||||
use proxmox_sys::fs::{create_path, CreateOptions};
|
||||
@ -21,7 +22,8 @@ use pxar::accessor::ReadAt;
|
||||
use pxar::{EntryKind, Metadata};
|
||||
|
||||
use pbs_datastore::catalog::{self, DirEntryAttribute};
|
||||
use proxmox_async::runtime::block_in_place;
|
||||
use proxmox_async::runtime::{block_in_place, block_on};
|
||||
use proxmox_log::error;
|
||||
|
||||
use crate::pxar::Flags;
|
||||
|
||||
@ -105,7 +107,7 @@ fn complete_path(complete_me: &str, _map: &HashMap<String, String>) -> Vec<Strin
|
||||
match shell.complete_path(complete_me) {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
log::error!("error during completion: {}", err);
|
||||
error!("error during completion: {}", err);
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
@ -240,8 +242,7 @@ async fn list_selected_command(patterns: bool) -> Result<(), Error> {
|
||||
input: {
|
||||
properties: {
|
||||
pattern: {
|
||||
type: String,
|
||||
description: "Match pattern for matching files in the catalog."
|
||||
type: PathPattern,
|
||||
},
|
||||
select: {
|
||||
type: bool,
|
||||
@ -282,9 +283,8 @@ async fn restore_selected_command(target: String) -> Result<(), Error> {
|
||||
description: "target path for restore on local filesystem."
|
||||
},
|
||||
pattern: {
|
||||
type: String,
|
||||
type: PathPattern,
|
||||
optional: true,
|
||||
description: "match pattern to limit files for restore."
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -304,7 +304,6 @@ async fn restore_command(target: String, pattern: Option<String>) -> Result<(),
|
||||
/// The `Path` type's component iterator does not tell us anything about trailing slashes or
|
||||
/// trailing `Component::CurDir` entries. Since we only support regular paths we'll roll our own
|
||||
/// here:
|
||||
|
||||
pub struct Shell {
|
||||
/// Readline instance handling input and callbacks
|
||||
rl: rustyline::Editor<CliHelper>,
|
||||
@ -312,8 +311,9 @@ pub struct Shell {
|
||||
/// Interactive prompt.
|
||||
prompt: String,
|
||||
|
||||
/// Catalog reader instance to navigate
|
||||
catalog: CatalogReader,
|
||||
/// Optional catalog reader instance to navigate, if not present the Accessor is used for
|
||||
/// navigation
|
||||
catalog: Option<CatalogReader>,
|
||||
|
||||
/// List of selected paths for restore
|
||||
selected: HashMap<OsString, MatchEntry>,
|
||||
@ -347,7 +347,7 @@ impl PathStackEntry {
|
||||
impl Shell {
|
||||
/// Create a new shell for the given catalog and pxar archive.
|
||||
pub async fn new(
|
||||
mut catalog: CatalogReader,
|
||||
mut catalog: Option<CatalogReader>,
|
||||
archive_name: &str,
|
||||
archive: Accessor,
|
||||
) -> Result<Self, Error> {
|
||||
@ -355,11 +355,31 @@ impl Shell {
|
||||
let mut rl = rustyline::Editor::<CliHelper>::new();
|
||||
rl.set_helper(Some(cli_helper));
|
||||
|
||||
let catalog_root = catalog.root()?;
|
||||
let archive_root = catalog
|
||||
.lookup(&catalog_root, archive_name.as_bytes())?
|
||||
.ok_or_else(|| format_err!("archive not found in catalog"))?;
|
||||
let position = vec![PathStackEntry::new(archive_root)];
|
||||
let mut position = Vec::new();
|
||||
if let Some(catalog) = catalog.as_mut() {
|
||||
let catalog_root = catalog.root()?;
|
||||
let archive_root = catalog
|
||||
.lookup(&catalog_root, archive_name.as_bytes())?
|
||||
.ok_or_else(|| format_err!("archive not found in catalog"))?;
|
||||
position.push(PathStackEntry::new(archive_root));
|
||||
} else {
|
||||
let root = archive.open_root().await?;
|
||||
let root_entry = root.lookup_self().await?;
|
||||
if let EntryKind::Directory = root_entry.kind() {
|
||||
let entry_attr = DirEntryAttribute::Directory {
|
||||
start: root_entry.entry_range_info().entry_range.start,
|
||||
};
|
||||
position.push(PathStackEntry {
|
||||
catalog: catalog::DirEntry {
|
||||
name: archive_name.into(),
|
||||
attr: entry_attr,
|
||||
},
|
||||
pxar: Some(root_entry),
|
||||
});
|
||||
} else {
|
||||
bail!("unexpected root entry type");
|
||||
}
|
||||
}
|
||||
|
||||
let mut this = Self {
|
||||
rl,
|
||||
@ -398,7 +418,7 @@ impl Shell {
|
||||
let args = match cli::shellword_split(&line) {
|
||||
Ok(args) => args,
|
||||
Err(err) => {
|
||||
log::error!("Error: {}", err);
|
||||
error!("Error: {}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@ -450,7 +470,7 @@ impl Shell {
|
||||
|
||||
async fn resolve_symlink(
|
||||
stack: &mut Vec<PathStackEntry>,
|
||||
catalog: &mut CatalogReader,
|
||||
catalog: &mut Option<CatalogReader>,
|
||||
accessor: &Accessor,
|
||||
follow_symlinks: &mut Option<usize>,
|
||||
) -> Result<(), Error> {
|
||||
@ -468,7 +488,7 @@ impl Shell {
|
||||
};
|
||||
|
||||
let new_stack =
|
||||
Self::lookup(stack, &mut *catalog, accessor, Some(path), follow_symlinks).await?;
|
||||
Self::lookup(stack, catalog, accessor, Some(path), follow_symlinks).await?;
|
||||
|
||||
*stack = new_stack;
|
||||
|
||||
@ -484,7 +504,7 @@ impl Shell {
|
||||
/// out.
|
||||
async fn step(
|
||||
stack: &mut Vec<PathStackEntry>,
|
||||
catalog: &mut CatalogReader,
|
||||
catalog: &mut Option<CatalogReader>,
|
||||
accessor: &Accessor,
|
||||
component: std::path::Component<'_>,
|
||||
follow_symlinks: &mut Option<usize>,
|
||||
@ -503,9 +523,27 @@ impl Shell {
|
||||
if stack.last().unwrap().catalog.is_symlink() {
|
||||
Self::resolve_symlink(stack, catalog, accessor, follow_symlinks).await?;
|
||||
}
|
||||
match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
|
||||
Some(dir) => stack.push(PathStackEntry::new(dir)),
|
||||
None => bail!("no such file or directory: {:?}", entry),
|
||||
if let Some(catalog) = catalog {
|
||||
match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
|
||||
Some(dir) => stack.push(PathStackEntry::new(dir)),
|
||||
None => bail!("no such file or directory: {entry:?}"),
|
||||
}
|
||||
} else {
|
||||
let pxar_entry = parent_pxar_entry(stack)?;
|
||||
let parent_dir = pxar_entry.enter_directory().await?;
|
||||
match parent_dir.lookup(entry).await? {
|
||||
Some(entry) => {
|
||||
let entry_attr = DirEntryAttribute::try_from(&entry)?;
|
||||
stack.push(PathStackEntry {
|
||||
catalog: catalog::DirEntry {
|
||||
name: entry.entry().file_name().as_bytes().into(),
|
||||
attr: entry_attr,
|
||||
},
|
||||
pxar: Some(entry),
|
||||
})
|
||||
}
|
||||
None => bail!("no such file or directory: {entry:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -515,7 +553,7 @@ impl Shell {
|
||||
|
||||
fn step_nofollow(
|
||||
stack: &mut Vec<PathStackEntry>,
|
||||
catalog: &mut CatalogReader,
|
||||
catalog: &mut Option<CatalogReader>,
|
||||
component: std::path::Component<'_>,
|
||||
) -> Result<(), Error> {
|
||||
use std::path::Component;
|
||||
@ -531,11 +569,27 @@ impl Shell {
|
||||
Component::Normal(entry) => {
|
||||
if stack.last().unwrap().catalog.is_symlink() {
|
||||
bail!("target is a symlink");
|
||||
} else {
|
||||
} else if let Some(catalog) = catalog.as_mut() {
|
||||
match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
|
||||
Some(dir) => stack.push(PathStackEntry::new(dir)),
|
||||
None => bail!("no such file or directory: {:?}", entry),
|
||||
}
|
||||
} else {
|
||||
let pxar_entry = parent_pxar_entry(stack)?;
|
||||
let parent_dir = block_on(pxar_entry.enter_directory())?;
|
||||
match block_on(parent_dir.lookup(entry))? {
|
||||
Some(entry) => {
|
||||
let entry_attr = DirEntryAttribute::try_from(&entry)?;
|
||||
stack.push(PathStackEntry {
|
||||
catalog: catalog::DirEntry {
|
||||
name: entry.entry().file_name().as_bytes().into(),
|
||||
attr: entry_attr,
|
||||
},
|
||||
pxar: Some(entry),
|
||||
})
|
||||
}
|
||||
None => bail!("no such file or directory: {entry:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -545,7 +599,7 @@ impl Shell {
|
||||
/// The pxar accessor is required to resolve symbolic links
|
||||
async fn walk_catalog(
|
||||
stack: &mut Vec<PathStackEntry>,
|
||||
catalog: &mut CatalogReader,
|
||||
catalog: &mut Option<CatalogReader>,
|
||||
accessor: &Accessor,
|
||||
path: &Path,
|
||||
follow_symlinks: &mut Option<usize>,
|
||||
@ -559,7 +613,7 @@ impl Shell {
|
||||
/// Non-async version cannot follow symlinks.
|
||||
fn walk_catalog_nofollow(
|
||||
stack: &mut Vec<PathStackEntry>,
|
||||
catalog: &mut CatalogReader,
|
||||
catalog: &mut Option<CatalogReader>,
|
||||
path: &Path,
|
||||
) -> Result<(), Error> {
|
||||
for c in path.components() {
|
||||
@ -612,12 +666,34 @@ impl Shell {
|
||||
tmp_stack = self.position.clone();
|
||||
}
|
||||
Self::walk_catalog_nofollow(&mut tmp_stack, &mut self.catalog, &path)?;
|
||||
(&tmp_stack.last().unwrap().catalog, base, part)
|
||||
(&tmp_stack.last().unwrap(), base, part)
|
||||
}
|
||||
None => (&self.position.last().unwrap().catalog, "", input),
|
||||
None => (&self.position.last().unwrap(), "", input),
|
||||
};
|
||||
|
||||
let entries = self.catalog.read_dir(parent)?;
|
||||
let entries = if let Some(catalog) = self.catalog.as_mut() {
|
||||
catalog.read_dir(&parent.catalog)?
|
||||
} else {
|
||||
let dir = if let Some(entry) = parent.pxar.as_ref() {
|
||||
block_on(entry.enter_directory())?
|
||||
} else {
|
||||
bail!("missing pxar entry for parent");
|
||||
};
|
||||
let mut out = Vec::new();
|
||||
let entries = block_on(crate::pxar::tools::pxar_metadata_read_dir(dir))?;
|
||||
for entry in entries {
|
||||
let mut name = base.to_string();
|
||||
let file_name = entry.file_name().as_bytes();
|
||||
if file_name.starts_with(part.as_bytes()) {
|
||||
name.push_str(std::str::from_utf8(file_name)?);
|
||||
if entry.is_dir() {
|
||||
name.push('/');
|
||||
}
|
||||
out.push(name);
|
||||
}
|
||||
}
|
||||
return Ok(out);
|
||||
};
|
||||
|
||||
let mut out = Vec::new();
|
||||
for entry in entries {
|
||||
@ -637,7 +713,7 @@ impl Shell {
|
||||
// Break async recursion here: lookup -> walk_catalog -> step -> lookup
|
||||
fn lookup<'future, 's, 'c, 'a, 'p, 'y>(
|
||||
stack: &'s [PathStackEntry],
|
||||
catalog: &'c mut CatalogReader,
|
||||
catalog: &'c mut Option<CatalogReader>,
|
||||
accessor: &'a Accessor,
|
||||
path: Option<&'p Path>,
|
||||
follow_symlinks: &'y mut Option<usize>,
|
||||
@ -678,7 +754,23 @@ impl Shell {
|
||||
|
||||
let last = stack.last().unwrap();
|
||||
if last.catalog.is_directory() {
|
||||
let items = self.catalog.read_dir(&stack.last().unwrap().catalog)?;
|
||||
let items = if let Some(catalog) = self.catalog.as_mut() {
|
||||
catalog.read_dir(&stack.last().unwrap().catalog)?
|
||||
} else {
|
||||
let dir = if let Some(entry) = last.pxar.as_ref() {
|
||||
entry.enter_directory().await?
|
||||
} else {
|
||||
bail!("missing pxar entry for parent");
|
||||
};
|
||||
|
||||
let mut out = std::io::stdout();
|
||||
let items = crate::pxar::tools::pxar_metadata_read_dir(dir).await?;
|
||||
for item in items {
|
||||
out.write_all(item.file_name().as_bytes())?;
|
||||
out.write_all(b"\n")?;
|
||||
}
|
||||
return Ok(());
|
||||
};
|
||||
let mut out = std::io::stdout();
|
||||
// FIXME: columnize
|
||||
for item in items {
|
||||
@ -705,7 +797,7 @@ impl Shell {
|
||||
|
||||
let file = Self::walk_pxar_archive(&self.accessor, &mut stack).await?;
|
||||
std::io::stdout()
|
||||
.write_all(crate::pxar::format_multi_line_entry(file.entry()).as_bytes())?;
|
||||
.write_all(crate::pxar::tools::format_multi_line_entry(file.entry()).as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -720,6 +812,14 @@ impl Shell {
|
||||
&mut None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if new_position.is_empty() {
|
||||
// Avoid moving below archive root into catalog root, thereby treating
|
||||
// the archive root as its own parent directory.
|
||||
self.position.truncate(1);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if !new_position.last().unwrap().catalog.is_directory() {
|
||||
bail!("not a directory");
|
||||
}
|
||||
@ -820,17 +920,36 @@ impl Shell {
|
||||
async fn list_matching_files(&mut self) -> Result<(), Error> {
|
||||
let matches = self.build_match_list();
|
||||
|
||||
self.catalog.find(
|
||||
&self.position[0].catalog,
|
||||
&mut Vec::new(),
|
||||
&matches,
|
||||
&mut |path: &[u8]| -> Result<(), Error> {
|
||||
let mut out = std::io::stdout();
|
||||
out.write_all(path)?;
|
||||
out.write_all(b"\n")?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
if let Some(catalog) = self.catalog.as_mut() {
|
||||
catalog.find(
|
||||
&self.position[0].catalog,
|
||||
&mut Vec::new(),
|
||||
&matches,
|
||||
&mut |path: &[u8]| -> Result<(), Error> {
|
||||
let mut out = std::io::stdout();
|
||||
out.write_all(path)?;
|
||||
out.write_all(b"\n")?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
} else {
|
||||
let parent_dir = if let Some(pxar_entry) = self.position[0].pxar.as_ref() {
|
||||
pxar_entry.enter_directory().await?
|
||||
} else {
|
||||
bail!("missing pxar entry for archive root");
|
||||
};
|
||||
crate::pxar::tools::pxar_metadata_catalog_find(
|
||||
parent_dir,
|
||||
&matches,
|
||||
&|path: &[u8]| -> Result<(), Error> {
|
||||
let mut out = std::io::stdout();
|
||||
out.write_all(path)?;
|
||||
out.write_all(b"\n")?;
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -841,18 +960,37 @@ impl Shell {
|
||||
MatchEntry::parse_pattern(pattern, PatternFlag::PATH_NAME, MatchType::Include)?;
|
||||
|
||||
let mut found_some = false;
|
||||
self.catalog.find(
|
||||
&self.position[0].catalog,
|
||||
&mut Vec::new(),
|
||||
&[&pattern_entry],
|
||||
&mut |path: &[u8]| -> Result<(), Error> {
|
||||
found_some = true;
|
||||
let mut out = std::io::stdout();
|
||||
out.write_all(path)?;
|
||||
out.write_all(b"\n")?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
if let Some(catalog) = self.catalog.as_mut() {
|
||||
catalog.find(
|
||||
&self.position[0].catalog,
|
||||
&mut Vec::new(),
|
||||
&[&pattern_entry],
|
||||
&mut |path: &[u8]| -> Result<(), Error> {
|
||||
found_some = true;
|
||||
let mut out = std::io::stdout();
|
||||
out.write_all(path)?;
|
||||
out.write_all(b"\n")?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
} else {
|
||||
let parent_dir = if let Some(pxar_entry) = self.position[0].pxar.as_ref() {
|
||||
pxar_entry.enter_directory().await?
|
||||
} else {
|
||||
bail!("missing pxar entry for archive root");
|
||||
};
|
||||
crate::pxar::tools::pxar_metadata_catalog_find(
|
||||
parent_dir,
|
||||
&[&pattern_entry],
|
||||
&|path: &[u8]| -> Result<(), Error> {
|
||||
let mut out = std::io::stdout();
|
||||
out.write_all(path)?;
|
||||
out.write_all(b"\n")?;
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
if found_some && select {
|
||||
self.selected.insert(pattern_os, pattern_entry);
|
||||
@ -945,6 +1083,18 @@ impl Shell {
|
||||
}
|
||||
}
|
||||
|
||||
fn parent_pxar_entry(dir_stack: &[PathStackEntry]) -> Result<&FileEntry, Error> {
|
||||
if let Some(parent) = dir_stack.last().as_ref() {
|
||||
if let Some(entry) = parent.pxar.as_ref() {
|
||||
Ok(entry)
|
||||
} else {
|
||||
bail!("missing pxar entry for parent");
|
||||
}
|
||||
} else {
|
||||
bail!("missing parent entry on stack");
|
||||
}
|
||||
}
|
||||
|
||||
struct ExtractorState<'a> {
|
||||
path: Vec<u8>,
|
||||
path_len: usize,
|
||||
@ -960,22 +1110,38 @@ struct ExtractorState<'a> {
|
||||
|
||||
extractor: crate::pxar::extract::Extractor,
|
||||
|
||||
catalog: &'a mut CatalogReader,
|
||||
catalog: &'a mut Option<CatalogReader>,
|
||||
match_list: &'a [MatchEntry],
|
||||
accessor: &'a Accessor,
|
||||
}
|
||||
|
||||
impl<'a> ExtractorState<'a> {
|
||||
pub fn new(
|
||||
catalog: &'a mut CatalogReader,
|
||||
catalog: &'a mut Option<CatalogReader>,
|
||||
dir_stack: Vec<PathStackEntry>,
|
||||
extractor: crate::pxar::extract::Extractor,
|
||||
match_list: &'a [MatchEntry],
|
||||
accessor: &'a Accessor,
|
||||
) -> Result<Self, Error> {
|
||||
let read_dir = catalog
|
||||
.read_dir(&dir_stack.last().unwrap().catalog)?
|
||||
.into_iter();
|
||||
let read_dir = if let Some(catalog) = catalog.as_mut() {
|
||||
catalog
|
||||
.read_dir(&dir_stack.last().unwrap().catalog)?
|
||||
.into_iter()
|
||||
} else {
|
||||
let pxar_entry = parent_pxar_entry(&dir_stack)?;
|
||||
let dir = block_on(pxar_entry.enter_directory())?;
|
||||
let entries = block_on(crate::pxar::tools::pxar_metadata_read_dir(dir))?;
|
||||
|
||||
let mut catalog_entries = Vec::with_capacity(entries.len());
|
||||
for entry in entries {
|
||||
let entry_attr = DirEntryAttribute::try_from(&entry).unwrap();
|
||||
catalog_entries.push(catalog::DirEntry {
|
||||
name: entry.entry().file_name().as_bytes().into(),
|
||||
attr: entry_attr,
|
||||
});
|
||||
}
|
||||
catalog_entries.into_iter()
|
||||
};
|
||||
Ok(Self {
|
||||
path: Vec::new(),
|
||||
path_len: 0,
|
||||
@ -1053,11 +1219,29 @@ impl<'a> ExtractorState<'a> {
|
||||
entry: catalog::DirEntry,
|
||||
match_result: Option<MatchType>,
|
||||
) -> Result<(), Error> {
|
||||
let entry_iter = if let Some(catalog) = self.catalog.as_mut() {
|
||||
catalog.read_dir(&entry)?.into_iter()
|
||||
} else {
|
||||
self.dir_stack.push(PathStackEntry::new(entry.clone()));
|
||||
let dir = Shell::walk_pxar_archive(self.accessor, &mut self.dir_stack).await?;
|
||||
self.dir_stack.pop();
|
||||
let dir = dir.enter_directory().await?;
|
||||
let entries = block_on(crate::pxar::tools::pxar_metadata_read_dir(dir))?;
|
||||
entries
|
||||
.into_iter()
|
||||
.map(|entry| {
|
||||
let entry_attr = DirEntryAttribute::try_from(&entry).unwrap();
|
||||
catalog::DirEntry {
|
||||
name: entry.entry().file_name().as_bytes().into(),
|
||||
attr: entry_attr,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<catalog::DirEntry>>()
|
||||
.into_iter()
|
||||
};
|
||||
// enter a new directory:
|
||||
self.read_dir_stack.push(mem::replace(
|
||||
&mut self.read_dir,
|
||||
self.catalog.read_dir(&entry)?.into_iter(),
|
||||
));
|
||||
self.read_dir_stack
|
||||
.push(mem::replace(&mut self.read_dir, entry_iter));
|
||||
self.matches_stack.push(self.matches);
|
||||
self.dir_stack.push(PathStackEntry::new(entry));
|
||||
self.path_len_stack.push(self.path_len);
|
||||
|
@ -4,11 +4,11 @@ use std::time::Duration;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use http::header::HeaderValue;
|
||||
use http::Uri;
|
||||
use http::{Request, Response};
|
||||
use hyper::client::{Client, HttpConnector};
|
||||
use hyper::Body;
|
||||
use hyper::http::header::HeaderValue;
|
||||
use hyper::http::Uri;
|
||||
use hyper::http::{Request, Response};
|
||||
use hyper::{body::HttpBody, Body};
|
||||
use openssl::{
|
||||
ssl::{SslConnector, SslMethod},
|
||||
x509::X509StoreContextRef,
|
||||
@ -25,6 +25,7 @@ use proxmox_async::broadcast_future::BroadcastFuture;
|
||||
use proxmox_http::client::HttpsConnector;
|
||||
use proxmox_http::uri::{build_authority, json_object_to_query};
|
||||
use proxmox_http::{ProxyConfig, RateLimiter};
|
||||
use proxmox_log::{error, info, warn};
|
||||
|
||||
use pbs_api_types::percent_encoding::DEFAULT_ENCODE_SET;
|
||||
use pbs_api_types::{Authid, RateLimitConfig, Userid};
|
||||
@ -348,14 +349,14 @@ impl HttpClient {
|
||||
if let Err(err) =
|
||||
store_fingerprint(prefix.as_ref().unwrap(), &server, &fingerprint)
|
||||
{
|
||||
log::error!("{}", err);
|
||||
error!("{}", err);
|
||||
}
|
||||
}
|
||||
*verified_fingerprint.lock().unwrap() = Some(fingerprint);
|
||||
true
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("certificate validation failed - {}", err);
|
||||
error!("certificate validation failed - {}", err);
|
||||
false
|
||||
}
|
||||
},
|
||||
@ -393,7 +394,7 @@ impl HttpClient {
|
||||
|
||||
let proxy_config = ProxyConfig::from_proxy_env()?;
|
||||
if let Some(config) = proxy_config {
|
||||
log::info!("Using proxy connection: {}:{}", config.host, config.port);
|
||||
info!("Using proxy connection: {}:{}", config.host, config.port);
|
||||
https.set_proxy(config);
|
||||
}
|
||||
|
||||
@ -461,14 +462,14 @@ impl HttpClient {
|
||||
&auth.token,
|
||||
) {
|
||||
if std::io::stdout().is_terminal() {
|
||||
log::error!("storing login ticket failed: {}", err);
|
||||
error!("storing login ticket failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
*auth2.write().unwrap() = auth;
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("re-authentication failed: {}", err);
|
||||
error!("re-authentication failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -498,7 +499,7 @@ impl HttpClient {
|
||||
&auth.token,
|
||||
) {
|
||||
if std::io::stdout().is_terminal() {
|
||||
log::error!("storing login ticket failed: {}", err);
|
||||
error!("storing login ticket failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -600,14 +601,14 @@ impl HttpClient {
|
||||
if expected_fingerprint == fp_string {
|
||||
return Ok(Some(fp_string));
|
||||
} else {
|
||||
log::warn!("WARNING: certificate fingerprint does not match expected fingerprint!");
|
||||
log::warn!("expected: {}", expected_fingerprint);
|
||||
warn!("WARNING: certificate fingerprint does not match expected fingerprint!");
|
||||
warn!("expected: {}", expected_fingerprint);
|
||||
}
|
||||
}
|
||||
|
||||
// If we're on a TTY, query the user
|
||||
if interactive && std::io::stdin().is_terminal() {
|
||||
log::info!("fingerprint: {}", fp_string);
|
||||
info!("fingerprint: {}", fp_string);
|
||||
loop {
|
||||
eprint!("Are you sure you want to continue connecting? (y/n): ");
|
||||
let _ = std::io::stdout().flush();
|
||||
@ -705,8 +706,7 @@ impl HttpClient {
|
||||
.map(|_| Err(format_err!("unknown error")))
|
||||
.await?
|
||||
} else {
|
||||
resp.into_body()
|
||||
.map_err(Error::from)
|
||||
futures::TryStreamExt::map_err(resp.into_body(), Error::from)
|
||||
.try_fold(output, move |acc, chunk| async move {
|
||||
acc.write_all(&chunk)?;
|
||||
Ok::<_, Error>(acc)
|
||||
@ -781,7 +781,7 @@ impl HttpClient {
|
||||
.map_err(|_| format_err!("http upgrade request timed out"))??;
|
||||
let status = resp.status();
|
||||
|
||||
if status != http::StatusCode::SWITCHING_PROTOCOLS {
|
||||
if status != hyper::http::StatusCode::SWITCHING_PROTOCOLS {
|
||||
Self::api_response(resp).await?;
|
||||
bail!("unknown error");
|
||||
}
|
||||
@ -790,14 +790,14 @@ impl HttpClient {
|
||||
|
||||
let max_window_size = (1 << 31) - 2;
|
||||
|
||||
let (h2, connection) = h2::client::Builder::new()
|
||||
let (h2, connection) = h2::legacy::client::Builder::new()
|
||||
.initial_connection_window_size(max_window_size)
|
||||
.initial_window_size(max_window_size)
|
||||
.max_frame_size(4 * 1024 * 1024)
|
||||
.handshake(upgraded)
|
||||
.await?;
|
||||
|
||||
let connection = connection.map_err(|_| log::error!("HTTP/2.0 connection failed"));
|
||||
let connection = connection.map_err(|_| error!("HTTP/2.0 connection failed"));
|
||||
|
||||
let (connection, abort) = futures::future::abortable(connection);
|
||||
// A cancellable future returns an Option which is None when cancelled and
|
||||
@ -843,7 +843,7 @@ impl HttpClient {
|
||||
|
||||
async fn api_response(response: Response<Body>) -> Result<Value, Error> {
|
||||
let status = response.status();
|
||||
let data = hyper::body::to_bytes(response.into_body()).await?;
|
||||
let data = HttpBody::collect(response.into_body()).await?.to_bytes();
|
||||
|
||||
let text = String::from_utf8(data.to_vec()).unwrap();
|
||||
if status.is_success() {
|
||||
@ -935,11 +935,11 @@ impl Drop for HttpClient {
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct H2Client {
|
||||
h2: h2::client::SendRequest<bytes::Bytes>,
|
||||
h2: h2::legacy::client::SendRequest<bytes::Bytes>,
|
||||
}
|
||||
|
||||
impl H2Client {
|
||||
pub fn new(h2: h2::client::SendRequest<bytes::Bytes>) -> Self {
|
||||
pub fn new(h2: h2::legacy::client::SendRequest<bytes::Bytes>) -> Self {
|
||||
Self { h2 }
|
||||
}
|
||||
|
||||
@ -1019,7 +1019,7 @@ impl H2Client {
|
||||
&self,
|
||||
request: Request<()>,
|
||||
data: Option<bytes::Bytes>,
|
||||
) -> impl Future<Output = Result<h2::client::ResponseFuture, Error>> {
|
||||
) -> impl Future<Output = Result<h2::legacy::client::ResponseFuture, Error>> {
|
||||
self.h2
|
||||
.clone()
|
||||
.ready()
|
||||
@ -1036,7 +1036,9 @@ impl H2Client {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn h2api_response(response: Response<h2::RecvStream>) -> Result<Value, Error> {
|
||||
pub async fn h2api_response(
|
||||
response: Response<h2::legacy::RecvStream>,
|
||||
) -> Result<Value, Error> {
|
||||
let status = response.status();
|
||||
|
||||
let (_head, mut body) = response.into_parts();
|
||||
|
@ -1,13 +1,13 @@
|
||||
use std::cmp;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{mpsc, Arc};
|
||||
use std::sync::mpsc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use anyhow::{anyhow, Error};
|
||||
use futures::{ready, Stream};
|
||||
use pin_project_lite::pin_project;
|
||||
|
||||
use crate::backup_stats::UploadCounters;
|
||||
use crate::pxar::create::ReusableDynamicEntry;
|
||||
|
||||
pin_project! {
|
||||
@ -16,7 +16,7 @@ pin_project! {
|
||||
input: S,
|
||||
next_injection: Option<InjectChunks>,
|
||||
injections: Option<mpsc::Receiver<InjectChunks>>,
|
||||
stream_len: Arc<AtomicUsize>,
|
||||
counters: UploadCounters,
|
||||
}
|
||||
}
|
||||
|
||||
@ -42,7 +42,7 @@ pub trait InjectReusedChunks: Sized {
|
||||
fn inject_reused_chunks(
|
||||
self,
|
||||
injections: Option<mpsc::Receiver<InjectChunks>>,
|
||||
stream_len: Arc<AtomicUsize>,
|
||||
counters: UploadCounters,
|
||||
) -> InjectReusedChunksQueue<Self>;
|
||||
}
|
||||
|
||||
@ -53,13 +53,13 @@ where
|
||||
fn inject_reused_chunks(
|
||||
self,
|
||||
injections: Option<mpsc::Receiver<InjectChunks>>,
|
||||
stream_len: Arc<AtomicUsize>,
|
||||
counters: UploadCounters,
|
||||
) -> InjectReusedChunksQueue<Self> {
|
||||
InjectReusedChunksQueue {
|
||||
input: self,
|
||||
next_injection: None,
|
||||
injections,
|
||||
stream_len,
|
||||
counters,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -85,7 +85,7 @@ where
|
||||
|
||||
if let Some(inject) = this.next_injection.take() {
|
||||
// got reusable dynamic entries to inject
|
||||
let offset = this.stream_len.load(Ordering::SeqCst) as u64;
|
||||
let offset = this.counters.total_stream_len() as u64;
|
||||
|
||||
match inject.boundary.cmp(&offset) {
|
||||
// inject now
|
||||
|
@ -9,6 +9,7 @@ pub mod tools;
|
||||
|
||||
mod inject_reused_chunks;
|
||||
mod merge_known_chunks;
|
||||
pub use merge_known_chunks::MergedChunkInfo;
|
||||
pub mod pipe_to_stream;
|
||||
|
||||
mod http_client;
|
||||
@ -41,4 +42,7 @@ pub use backup_specification::*;
|
||||
mod chunk_stream;
|
||||
pub use chunk_stream::{ChunkStream, FixedChunkStream, InjectionData};
|
||||
|
||||
mod backup_stats;
|
||||
pub use backup_stats::BackupStats;
|
||||
|
||||
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
|
||||
|
@ -8,7 +8,7 @@ use std::task::{Context, Poll};
|
||||
use anyhow::{format_err, Error};
|
||||
use bytes::Bytes;
|
||||
use futures::{ready, Future};
|
||||
use h2::SendStream;
|
||||
use h2::legacy::SendStream;
|
||||
|
||||
pub struct PipeToSendStream {
|
||||
body_tx: SendStream<Bytes>,
|
||||
|
@ -27,6 +27,7 @@ use pxar::{EntryKind, Metadata, PxarVariant};
|
||||
|
||||
use proxmox_human_byte::HumanByte;
|
||||
use proxmox_io::vec;
|
||||
use proxmox_log::{debug, error, info, warn};
|
||||
use proxmox_sys::fs::{self, acl, xattr};
|
||||
|
||||
use pbs_datastore::catalog::BackupCatalogWriter;
|
||||
@ -62,7 +63,7 @@ pub struct PxarCreateOptions {
|
||||
|
||||
pub type MetadataArchiveReader = Arc<dyn ReadAt + Send + Sync + 'static>;
|
||||
|
||||
/// Statefull information of previous backups snapshots for partial backups
|
||||
/// Stateful information of previous backups snapshots for partial backups
|
||||
pub struct PxarPrevRef {
|
||||
/// Reference accessor for metadata comparison
|
||||
pub accessor: Accessor<MetadataArchiveReader>,
|
||||
@ -72,7 +73,7 @@ pub struct PxarPrevRef {
|
||||
pub archive_name: String,
|
||||
}
|
||||
|
||||
fn detect_fs_type(fd: RawFd) -> Result<i64, Error> {
|
||||
fn detect_fs_type(fd: RawFd) -> Result<i64, Errno> {
|
||||
let mut fs_stat = std::mem::MaybeUninit::uninit();
|
||||
let res = unsafe { libc::fstatfs(fd, fs_stat.as_mut_ptr()) };
|
||||
Errno::result(res)?;
|
||||
@ -315,25 +316,25 @@ where
|
||||
encoder.close().await?;
|
||||
|
||||
if metadata_mode {
|
||||
log::info!("Change detection summary:");
|
||||
log::info!(
|
||||
info!("Change detection summary:");
|
||||
info!(
|
||||
" - {} total files ({} hardlinks)",
|
||||
archiver.reuse_stats.files_reused_count
|
||||
+ archiver.reuse_stats.files_reencoded_count
|
||||
+ archiver.reuse_stats.files_hardlink_count,
|
||||
archiver.reuse_stats.files_hardlink_count,
|
||||
);
|
||||
log::info!(
|
||||
info!(
|
||||
" - {} unchanged, reusable files with {} data",
|
||||
archiver.reuse_stats.files_reused_count,
|
||||
HumanByte::from(archiver.reuse_stats.total_reused_payload_size),
|
||||
);
|
||||
log::info!(
|
||||
info!(
|
||||
" - {} changed or non-reusable files with {} data",
|
||||
archiver.reuse_stats.files_reencoded_count,
|
||||
HumanByte::from(archiver.reuse_stats.total_reencoded_size),
|
||||
);
|
||||
log::info!(
|
||||
info!(
|
||||
" - {} padding in {} partially reused chunks",
|
||||
HumanByte::from(
|
||||
archiver.reuse_stats.total_injected_size
|
||||
@ -422,6 +423,7 @@ impl Archiver {
|
||||
previous_metadata_accessor: &Option<Directory<MetadataArchiveReader>>,
|
||||
file_name: &Path,
|
||||
metadata: &Metadata,
|
||||
file_size: u64,
|
||||
) -> Result<Option<Range<u64>>, Error> {
|
||||
if let Some(previous_metadata_accessor) = previous_metadata_accessor {
|
||||
if let Some(file_entry) = previous_metadata_accessor.lookup(file_name).await? {
|
||||
@ -432,20 +434,23 @@ impl Archiver {
|
||||
..
|
||||
} = file_entry.entry().kind()
|
||||
{
|
||||
if file_size != *size {
|
||||
return Ok(None);
|
||||
}
|
||||
let range =
|
||||
*offset..*offset + size + size_of::<pxar::format::Header>() as u64;
|
||||
log::debug!(
|
||||
debug!(
|
||||
"reusable: {file_name:?} at range {range:?} has unchanged metadata."
|
||||
);
|
||||
return Ok(Some(range));
|
||||
}
|
||||
log::debug!("reencode: {file_name:?} not a regular file.");
|
||||
debug!("re-encode: {file_name:?} not a regular file.");
|
||||
return Ok(None);
|
||||
}
|
||||
log::debug!("reencode: {file_name:?} metadata did not match.");
|
||||
debug!("re-encode: {file_name:?} metadata did not match.");
|
||||
return Ok(None);
|
||||
}
|
||||
log::debug!("reencode: {file_name:?} not found in previous archive.");
|
||||
debug!("re-encode: {file_name:?} not found in previous archive.");
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
@ -476,12 +481,16 @@ impl Archiver {
|
||||
Ok(fd) => Ok(Some(fd)),
|
||||
Err(Errno::ENOENT) => {
|
||||
if existed {
|
||||
self.report_vanished_file()?;
|
||||
self.report_vanished_file();
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
Err(Errno::EACCES) => {
|
||||
log::warn!("failed to open file: {:?}: access denied", file_name);
|
||||
warn!("failed to open file: {:?}: access denied", file_name);
|
||||
Ok(None)
|
||||
}
|
||||
Err(Errno::ESTALE) => {
|
||||
self.report_stale_file_handle(None);
|
||||
Ok(None)
|
||||
}
|
||||
Err(Errno::EPERM) if !noatime.is_empty() => {
|
||||
@ -511,10 +520,9 @@ impl Archiver {
|
||||
let line = match line {
|
||||
Ok(line) => line,
|
||||
Err(err) => {
|
||||
log::warn!(
|
||||
warn!(
|
||||
"ignoring .pxarexclude after read error in {:?}: {}",
|
||||
self.path,
|
||||
err,
|
||||
self.path, err,
|
||||
);
|
||||
self.patterns.truncate(old_pattern_count);
|
||||
return Ok(());
|
||||
@ -554,7 +562,7 @@ impl Archiver {
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("bad pattern in {:?}: {}", self.path, err);
|
||||
error!("bad pattern in {:?}: {}", self.path, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -635,18 +643,36 @@ impl Archiver {
|
||||
});
|
||||
|
||||
match match_result {
|
||||
Ok(Some(MatchType::Exclude)) => continue,
|
||||
Ok(Some(MatchType::Exclude)) => {
|
||||
debug!("matched by exclude pattern '{full_path:?}'");
|
||||
continue;
|
||||
}
|
||||
Ok(_) => (),
|
||||
Err(err) if err.not_found() => continue,
|
||||
Err(Errno::ESTALE) => {
|
||||
self.report_stale_file_handle(Some(&full_path));
|
||||
continue;
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err).with_context(|| format!("stat failed on {full_path:?}"))
|
||||
}
|
||||
}
|
||||
|
||||
let stat = stat_results
|
||||
.map(Ok)
|
||||
.unwrap_or_else(get_file_mode)
|
||||
.with_context(|| format!("stat failed on {full_path:?}"))?;
|
||||
let stat = match stat_results {
|
||||
Some(mode) => mode,
|
||||
None => match get_file_mode() {
|
||||
Ok(mode) => mode,
|
||||
Err(Errno::ESTALE) => {
|
||||
self.report_stale_file_handle(Some(&full_path));
|
||||
continue;
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(
|
||||
Error::from(err).context(format!("stat failed on {full_path:?}"))
|
||||
)
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
self.entry_counter += 1;
|
||||
if self.entry_counter > self.entry_limit {
|
||||
@ -668,25 +694,27 @@ impl Archiver {
|
||||
Ok(file_list)
|
||||
}
|
||||
|
||||
fn report_vanished_file(&mut self) -> Result<(), Error> {
|
||||
log::warn!("warning: file vanished while reading: {:?}", self.path);
|
||||
Ok(())
|
||||
fn report_stale_file_handle(&self, path: Option<&PathBuf>) {
|
||||
let path = path.unwrap_or(&self.path);
|
||||
warn!("warning: stale file handle encountered while reading: {path:?}");
|
||||
}
|
||||
|
||||
fn report_file_shrunk_while_reading(&mut self) -> Result<(), Error> {
|
||||
log::warn!(
|
||||
fn report_vanished_file(&self) {
|
||||
warn!("warning: file vanished while reading: {:?}", self.path);
|
||||
}
|
||||
|
||||
fn report_file_shrunk_while_reading(&self) {
|
||||
warn!(
|
||||
"warning: file size shrunk while reading: {:?}, file will be padded with zeros!",
|
||||
self.path,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn report_file_grew_while_reading(&mut self) -> Result<(), Error> {
|
||||
log::warn!(
|
||||
fn report_file_grew_while_reading(&self) {
|
||||
warn!(
|
||||
"warning: file size increased while reading: {:?}, file will be truncated!",
|
||||
self.path,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn add_entry<T: SeqWrite + Send>(
|
||||
@ -716,23 +744,23 @@ impl Archiver {
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
let match_path = PathBuf::from("/").join(self.path.clone());
|
||||
if self
|
||||
.patterns
|
||||
.matches(match_path.as_os_str().as_bytes(), stat.st_mode)?
|
||||
== Some(MatchType::Exclude)
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let metadata = get_metadata(
|
||||
let metadata = match get_metadata(
|
||||
fd.as_raw_fd(),
|
||||
stat,
|
||||
self.flags(),
|
||||
self.fs_magic,
|
||||
&mut self.fs_feature_flags,
|
||||
self.skip_e2big_xattr,
|
||||
)?;
|
||||
) {
|
||||
Ok(metadata) => metadata,
|
||||
Err(err) => {
|
||||
if let Some(Errno::ESTALE) = err.downcast_ref::<Errno>() {
|
||||
self.report_stale_file_handle(None);
|
||||
return Ok(());
|
||||
}
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
if self.previous_payload_index.is_none() {
|
||||
return self
|
||||
@ -742,7 +770,7 @@ impl Archiver {
|
||||
|
||||
// Avoid having to many open file handles in cached entries
|
||||
if self.cache.is_full() {
|
||||
log::debug!("Max cache size reached, reuse cached entries");
|
||||
debug!("Max cache size reached, reuse cached entries");
|
||||
self.flush_cached_reusing_if_below_threshold(encoder, true)
|
||||
.await?;
|
||||
}
|
||||
@ -774,12 +802,13 @@ impl Archiver {
|
||||
}
|
||||
|
||||
let file_name: &Path = OsStr::from_bytes(c_file_name.to_bytes()).as_ref();
|
||||
let file_size = stat.st_size as u64;
|
||||
if let Some(payload_range) = self
|
||||
.is_reusable_entry(previous_metadata, file_name, &metadata)
|
||||
.is_reusable_entry(previous_metadata, file_name, &metadata, file_size)
|
||||
.await?
|
||||
{
|
||||
if !self.cache.try_extend_range(payload_range.clone()) {
|
||||
log::debug!("Cache range has hole, new range: {payload_range:?}");
|
||||
debug!("Cache range has hole, new range: {payload_range:?}");
|
||||
self.flush_cached_reusing_if_below_threshold(encoder, true)
|
||||
.await?;
|
||||
// range has to be set after flushing of cached entries, which resets the range
|
||||
@ -790,7 +819,7 @@ impl Archiver {
|
||||
// actual chunks, which needs to be added before encoding the payload reference
|
||||
let offset =
|
||||
PayloadOffset::default().add(payload_range.start - self.cache.range().start);
|
||||
log::debug!("Offset relative to range start: {offset:?}");
|
||||
debug!("Offset relative to range start: {offset:?}");
|
||||
|
||||
self.cache.insert(
|
||||
fd,
|
||||
@ -842,6 +871,7 @@ impl Archiver {
|
||||
.await
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn add_entry_to_archive<T: SeqWrite + Send>(
|
||||
&mut self,
|
||||
encoder: &mut Encoder<'_, T>,
|
||||
@ -994,7 +1024,7 @@ impl Archiver {
|
||||
// do not reuse chunks if introduced padding higher than threshold
|
||||
// opt for re-encoding in that case
|
||||
if ratio > CHUNK_PADDING_THRESHOLD {
|
||||
log::debug!(
|
||||
debug!(
|
||||
"Padding ratio: {ratio} > {CHUNK_PADDING_THRESHOLD}, padding: {}, total {}, chunks: {}",
|
||||
HumanByte::from(padding),
|
||||
HumanByte::from(total_size),
|
||||
@ -1003,7 +1033,7 @@ impl Archiver {
|
||||
self.cache.update_last_chunk(prev_last_chunk);
|
||||
self.encode_entries_to_archive(encoder, None).await?;
|
||||
} else {
|
||||
log::debug!(
|
||||
debug!(
|
||||
"Padding ratio: {ratio} < {CHUNK_PADDING_THRESHOLD}, padding: {}, total {}, chunks: {}",
|
||||
HumanByte::from(padding),
|
||||
HumanByte::from(total_size),
|
||||
@ -1039,7 +1069,7 @@ impl Archiver {
|
||||
}
|
||||
|
||||
// Take ownership of cached entries and encode them to the archive
|
||||
// Encode with reused payload chunks when base offset is some, reencode otherwise
|
||||
// Encode with reused payload chunks when base offset is some, re-encode otherwise
|
||||
async fn encode_entries_to_archive<T: SeqWrite + Send>(
|
||||
&mut self,
|
||||
encoder: &mut Encoder<'_, T>,
|
||||
@ -1054,7 +1084,7 @@ impl Archiver {
|
||||
let (entries, start_path) = self.cache.take_and_reset();
|
||||
let old_path = self.path.clone();
|
||||
self.path = start_path;
|
||||
log::debug!(
|
||||
debug!(
|
||||
"Got {} cache entries to encode: reuse is {}",
|
||||
entries.len(),
|
||||
base_offset.is_some()
|
||||
@ -1123,7 +1153,7 @@ impl Archiver {
|
||||
let mut size = PayloadOffset::default();
|
||||
|
||||
for chunk in chunks.iter() {
|
||||
log::debug!(
|
||||
debug!(
|
||||
"Injecting chunk with {} padding (chunk size {})",
|
||||
HumanByte::from(chunk.padding),
|
||||
HumanByte::from(chunk.size()),
|
||||
@ -1151,7 +1181,7 @@ impl Archiver {
|
||||
};
|
||||
|
||||
injection_boundary = injection_boundary.add(size.raw());
|
||||
log::debug!("Advance payload position by: {size:?}");
|
||||
debug!("Advance payload position by: {size:?}");
|
||||
encoder.advance(size)?;
|
||||
}
|
||||
|
||||
@ -1169,20 +1199,20 @@ impl Archiver {
|
||||
) -> Result<(), Error> {
|
||||
let dir_name = OsStr::from_bytes(c_dir_name.to_bytes());
|
||||
|
||||
if !self.cache.caching_enabled() {
|
||||
if let Some(ref catalog) = self.catalog {
|
||||
catalog.lock().unwrap().start_directory(c_dir_name)?;
|
||||
}
|
||||
encoder.create_directory(dir_name, metadata).await?;
|
||||
}
|
||||
|
||||
let old_fs_magic = self.fs_magic;
|
||||
let old_fs_feature_flags = self.fs_feature_flags;
|
||||
let old_st_dev = self.current_st_dev;
|
||||
|
||||
let mut skip_contents = false;
|
||||
if old_st_dev != stat.st_dev {
|
||||
self.fs_magic = detect_fs_type(dir.as_raw_fd())?;
|
||||
match detect_fs_type(dir.as_raw_fd()) {
|
||||
Ok(fs_magic) => self.fs_magic = fs_magic,
|
||||
Err(Errno::ESTALE) => {
|
||||
self.report_stale_file_handle(None);
|
||||
return Ok(());
|
||||
}
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
self.fs_feature_flags = Flags::from_magic(self.fs_magic);
|
||||
self.current_st_dev = stat.st_dev;
|
||||
|
||||
@ -1193,8 +1223,15 @@ impl Archiver {
|
||||
}
|
||||
}
|
||||
|
||||
if !self.cache.caching_enabled() {
|
||||
if let Some(ref catalog) = self.catalog {
|
||||
catalog.lock().unwrap().start_directory(c_dir_name)?;
|
||||
}
|
||||
encoder.create_directory(dir_name, metadata).await?;
|
||||
}
|
||||
|
||||
let result = if skip_contents {
|
||||
log::info!("skipping mount point: {:?}", self.path);
|
||||
info!("skipping mount point: {:?}", self.path);
|
||||
Ok(())
|
||||
} else {
|
||||
let mut dir_accessor = None;
|
||||
@ -1245,14 +1282,14 @@ impl Archiver {
|
||||
Err(err) => bail!(err),
|
||||
};
|
||||
if got as u64 > remaining {
|
||||
self.report_file_grew_while_reading()?;
|
||||
self.report_file_grew_while_reading();
|
||||
got = remaining as usize;
|
||||
}
|
||||
out.write_all(&self.file_copy_buffer[..got]).await?;
|
||||
remaining -= got as u64;
|
||||
}
|
||||
if remaining > 0 {
|
||||
self.report_file_shrunk_while_reading()?;
|
||||
self.report_file_shrunk_while_reading();
|
||||
let to_zero = remaining.min(self.file_copy_buffer.len() as u64) as usize;
|
||||
vec::clear(&mut self.file_copy_buffer[..to_zero]);
|
||||
while remaining != 0 {
|
||||
@ -1272,7 +1309,14 @@ impl Archiver {
|
||||
file_name: &Path,
|
||||
metadata: &Metadata,
|
||||
) -> Result<(), Error> {
|
||||
let dest = nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..])?;
|
||||
let dest = match nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..]) {
|
||||
Ok(dest) => dest,
|
||||
Err(Errno::ESTALE) => {
|
||||
self.report_stale_file_handle(None);
|
||||
return Ok(());
|
||||
}
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
encoder.add_symlink(metadata, file_name, dest).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ impl PxarDir {
|
||||
let dir = Dir::openat(
|
||||
parent,
|
||||
self.file_name.as_os_str(),
|
||||
OFlag::O_DIRECTORY,
|
||||
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
|
||||
Mode::empty(),
|
||||
)?;
|
||||
|
||||
|
@ -22,6 +22,7 @@ use pxar::format::Device;
|
||||
use pxar::{Entry, EntryKind, Metadata};
|
||||
|
||||
use proxmox_io::{sparse_copy, sparse_copy_async};
|
||||
use proxmox_log::{debug, error, info};
|
||||
use proxmox_sys::c_result;
|
||||
use proxmox_sys::fs::{create_path, CreateOptions};
|
||||
|
||||
@ -29,8 +30,8 @@ use proxmox_compression::zip::{ZipEncoder, ZipEntry};
|
||||
|
||||
use crate::pxar::dir_stack::PxarDirStack;
|
||||
use crate::pxar::metadata;
|
||||
use crate::pxar::tools::handle_root_with_optional_format_version_prelude;
|
||||
use crate::pxar::Flags;
|
||||
use crate::tools::handle_root_with_optional_format_version_prelude;
|
||||
|
||||
pub struct PxarExtractOptions<'a> {
|
||||
pub match_list: &'a [MatchEntry],
|
||||
@ -132,18 +133,27 @@ where
|
||||
|
||||
if let Some(ref path) = options.prelude_path {
|
||||
if let Some(entry) = prelude {
|
||||
let mut prelude_file = OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
let overwrite = options.overwrite_flags.contains(OverwriteFlags::FILE);
|
||||
|
||||
let mut open_options = OpenOptions::new();
|
||||
open_options.write(true);
|
||||
if overwrite {
|
||||
open_options.create(true);
|
||||
open_options.truncate(true);
|
||||
} else {
|
||||
open_options.create_new(true);
|
||||
}
|
||||
|
||||
let mut prelude_file = open_options
|
||||
.open(path)
|
||||
.with_context(|| format!("error creating prelude file '{path:?}'"))?;
|
||||
if let pxar::EntryKind::Prelude(ref prelude) = entry.kind() {
|
||||
prelude_file.write_all(prelude.as_ref())?;
|
||||
} else {
|
||||
log::info!("unexpected entry kind for prelude");
|
||||
info!("unexpected entry kind for prelude");
|
||||
}
|
||||
} else {
|
||||
log::info!("No prelude entry found, skip prelude restore.");
|
||||
info!("No prelude entry found, skip prelude restore.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -195,30 +205,29 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, F> Iterator for ExtractorIter<'a, T, F>
|
||||
impl<T, F> Iterator for ExtractorIter<'_, T, F>
|
||||
where
|
||||
T: pxar::decoder::SeqRead,
|
||||
F: FnMut(&Path),
|
||||
{
|
||||
type Item = Result<(), Error>;
|
||||
|
||||
/// Performs the extraction of [`Entries`][E] yielded by the [`Decoder`][D].
|
||||
/// Performs the extraction of [`Entries`][Entry] yielded by the [`Decoder`][D].
|
||||
///
|
||||
/// In detail, the [`ExtractorIter`] will stop if and only if one of the
|
||||
/// following conditions is true:
|
||||
/// * The [`Decoder`][D] is exhausted
|
||||
/// * The [`Decoder`][D] failed to read from the archive and consequently
|
||||
/// yielded an [`io::Error`]
|
||||
/// * The [`Entry`][E]'s filename is invalid (contains nul bytes or a slash)
|
||||
/// * The [`Entry`]'s filename is invalid (contains nul bytes or a slash)
|
||||
///
|
||||
/// Should an error occur during any point of extraction (**not** while
|
||||
/// fetching the next [`Entry`][E]), the error may be handled by the
|
||||
/// fetching the next [`Entry`]), the error may be handled by the
|
||||
/// [`ErrorHandler`] provided by the [`PxarExtractOptions`] used to
|
||||
/// initialize the iterator.
|
||||
///
|
||||
/// Extraction errors will have a corresponding [`PxarExtractContext`] attached.
|
||||
///
|
||||
/// [E]: pxar::Entry
|
||||
/// [D]: pxar::decoder::Decoder
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.state.end_reached {
|
||||
@ -724,7 +733,10 @@ impl Extractor {
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
file.as_raw_fd(),
|
||||
self.dir_stack.path(),
|
||||
&self
|
||||
.dir_stack
|
||||
.path()
|
||||
.join(file_name.to_string_lossy().to_string()),
|
||||
&mut self.on_error,
|
||||
)
|
||||
}
|
||||
@ -783,7 +795,10 @@ impl Extractor {
|
||||
self.feature_flags,
|
||||
metadata,
|
||||
file.as_raw_fd(),
|
||||
self.dir_stack.path(),
|
||||
&self
|
||||
.dir_stack
|
||||
.path()
|
||||
.join(file_name.to_string_lossy().to_string()),
|
||||
&mut self.on_error,
|
||||
)
|
||||
}
|
||||
@ -796,9 +811,9 @@ fn add_metadata_to_header(header: &mut tar::Header, metadata: &Metadata) {
|
||||
header.set_gid(metadata.stat.gid as u64);
|
||||
}
|
||||
|
||||
async fn tar_add_file<'a, W, T>(
|
||||
async fn tar_add_file<W, T>(
|
||||
tar: &mut proxmox_compression::tar::Builder<W>,
|
||||
contents: Option<Contents<'a, T>>,
|
||||
contents: Option<Contents<'_, T>>,
|
||||
size: u64,
|
||||
metadata: &Metadata,
|
||||
path: &Path,
|
||||
@ -881,7 +896,7 @@ where
|
||||
let metadata = realfile.entry().metadata();
|
||||
let realpath = Path::new(link);
|
||||
|
||||
log::debug!("adding '{}' to tar", path.display());
|
||||
debug!("adding '{}' to tar", path.display());
|
||||
|
||||
let stripped_path = match realpath.strip_prefix(prefix) {
|
||||
Ok(path) => path,
|
||||
@ -910,7 +925,7 @@ where
|
||||
}
|
||||
}
|
||||
EntryKind::Symlink(link) if !link.data.is_empty() => {
|
||||
log::debug!("adding '{}' to tar", path.display());
|
||||
debug!("adding '{}' to tar", path.display());
|
||||
let realpath = Path::new(link);
|
||||
let mut header = tar::Header::new_gnu();
|
||||
header.set_entry_type(tar::EntryType::Symlink);
|
||||
@ -922,7 +937,7 @@ where
|
||||
.context("could not send symlink entry")?;
|
||||
}
|
||||
EntryKind::Fifo => {
|
||||
log::debug!("adding '{}' to tar", path.display());
|
||||
debug!("adding '{}' to tar", path.display());
|
||||
let mut header = tar::Header::new_gnu();
|
||||
header.set_entry_type(tar::EntryType::Fifo);
|
||||
add_metadata_to_header(&mut header, metadata);
|
||||
@ -936,7 +951,7 @@ where
|
||||
.context("could not send fifo entry")?;
|
||||
}
|
||||
EntryKind::Directory => {
|
||||
log::debug!("adding '{}' to tar", path.display());
|
||||
debug!("adding '{}' to tar", path.display());
|
||||
// we cannot add the root path itself
|
||||
if path != Path::new("/") {
|
||||
let mut header = tar::Header::new_gnu();
|
||||
@ -951,7 +966,7 @@ where
|
||||
}
|
||||
}
|
||||
EntryKind::Device(device) => {
|
||||
log::debug!("adding '{}' to tar", path.display());
|
||||
debug!("adding '{}' to tar", path.display());
|
||||
let entry_type = if metadata.stat.is_chardev() {
|
||||
tar::EntryType::Char
|
||||
} else {
|
||||
@ -974,7 +989,7 @@ where
|
||||
}
|
||||
|
||||
tarencoder.finish().await.map_err(|err| {
|
||||
log::error!("error during finishing of zip: {}", err);
|
||||
error!("error during finishing of zip: {}", err);
|
||||
err
|
||||
})?;
|
||||
Ok(())
|
||||
@ -1023,7 +1038,7 @@ where
|
||||
|
||||
match entry.kind() {
|
||||
EntryKind::File { .. } => {
|
||||
log::debug!("adding '{}' to zip", path.display());
|
||||
debug!("adding '{}' to zip", path.display());
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
@ -1042,7 +1057,7 @@ where
|
||||
.with_context(|| format!("error looking up {:?}", path))?;
|
||||
let realfile = accessor.follow_hardlink(&entry).await?;
|
||||
let metadata = realfile.entry().metadata();
|
||||
log::debug!("adding '{}' to zip", path.display());
|
||||
debug!("adding '{}' to zip", path.display());
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
@ -1055,7 +1070,7 @@ where
|
||||
.context("could not send file entry")?;
|
||||
}
|
||||
EntryKind::Directory => {
|
||||
log::debug!("adding '{}' to zip", path.display());
|
||||
debug!("adding '{}' to zip", path.display());
|
||||
let entry = ZipEntry::new(
|
||||
path,
|
||||
metadata.stat.mtime.secs,
|
||||
@ -1145,7 +1160,7 @@ where
|
||||
let mut extractor = get_extractor(destination, root.metadata().clone())?;
|
||||
|
||||
if let Err(err) = seq_files_extractor(&mut extractor, decoder).await {
|
||||
log::error!("error extracting pxar archive: {}", err);
|
||||
error!("error extracting pxar archive: {}", err);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -1209,7 +1224,7 @@ where
|
||||
let metadata = entry.metadata();
|
||||
let (file_name_os, file_name) = get_filename(entry)?;
|
||||
|
||||
log::debug!("extracting: {}", file.path().display());
|
||||
debug!("extracting: {}", file.path().display());
|
||||
|
||||
match file.kind() {
|
||||
EntryKind::Directory => {
|
||||
@ -1261,7 +1276,7 @@ where
|
||||
let (file_name_os, file_name) = get_filename(&entry)?;
|
||||
|
||||
if !matches!(entry.kind(), EntryKind::GoodbyeTable) {
|
||||
log::debug!("extracting: {}", entry.path().display());
|
||||
debug!("extracting: {}", entry.path().display());
|
||||
}
|
||||
|
||||
if let Err(err) = async {
|
||||
@ -1297,13 +1312,13 @@ where
|
||||
}
|
||||
.await
|
||||
{
|
||||
let display = entry.path().display().to_string();
|
||||
log::error!(
|
||||
let display_string = entry.path().display().to_string();
|
||||
error!(
|
||||
"error extracting {}: {}",
|
||||
if matches!(entry.kind(), EntryKind::GoodbyeTable) {
|
||||
"<directory>"
|
||||
} else {
|
||||
&display
|
||||
&display_string
|
||||
},
|
||||
err
|
||||
);
|
||||
|
@ -2,13 +2,14 @@ use std::ffi::{CStr, CString};
|
||||
use std::os::unix::io::{AsRawFd, RawFd};
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{anyhow, bail, Context, Error};
|
||||
use anyhow::{anyhow, Context, Error};
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use pxar::Metadata;
|
||||
|
||||
use proxmox_log::{info, warn};
|
||||
use proxmox_sys::c_result;
|
||||
use proxmox_sys::error::SysError;
|
||||
use proxmox_sys::fs::{self, acl, xattr};
|
||||
@ -72,7 +73,13 @@ pub fn apply_at(
|
||||
Mode::empty(),
|
||||
)?;
|
||||
|
||||
apply(flags, metadata, fd.as_raw_fd(), path_info, on_error)
|
||||
apply(
|
||||
flags,
|
||||
metadata,
|
||||
fd.as_raw_fd(),
|
||||
&path_info.join(file_name.to_string_lossy().to_string()),
|
||||
on_error,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn apply_initial_flags(
|
||||
@ -215,7 +222,7 @@ fn apply_xattrs(
|
||||
}
|
||||
|
||||
if !xattr::is_valid_xattr_name(xattr.name()) {
|
||||
log::info!("skipping invalid xattr named {:?}", xattr.name());
|
||||
info!("skipping invalid xattr named {:?}", xattr.name());
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -276,7 +283,7 @@ fn apply_acls(
|
||||
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, mode)?;
|
||||
|
||||
if !metadata.acl.users.is_empty() || !metadata.acl.groups.is_empty() {
|
||||
log::warn!(
|
||||
warn!(
|
||||
"Warning: {:?}: Missing GROUP_OBJ entry in ACL, resetting to value of MASK",
|
||||
path_info,
|
||||
);
|
||||
@ -294,7 +301,7 @@ fn apply_acls(
|
||||
}
|
||||
|
||||
if !acl.is_valid() {
|
||||
bail!("Error while restoring ACL - ACL invalid");
|
||||
warn!("Warning: {path_info:?} - ACL invalid, attempting restore anyway..");
|
||||
}
|
||||
|
||||
acl.set_file(c_proc_path, acl::ACL_TYPE_ACCESS)?;
|
||||
@ -323,7 +330,7 @@ fn apply_acls(
|
||||
}
|
||||
|
||||
if !acl.is_valid() {
|
||||
bail!("Error while restoring ACL - ACL invalid");
|
||||
warn!("Warning: {path_info:?} - ACL invalid, attempting restore anyway..");
|
||||
}
|
||||
|
||||
acl.set_file(c_proc_path, acl::ACL_TYPE_DEFAULT)?;
|
||||
|
@ -32,7 +32,7 @@
|
||||
//!
|
||||
//! * `FILENAME` -- name of the first directory entry (strictly ordered!)
|
||||
//! * `<archive>` -- serialization of the first directory entry's metadata and contents,
|
||||
//! following the exact same archive format
|
||||
//! following the exact same archive format
|
||||
//! * `FILENAME` -- name of the second directory entry (strictly ordered!)
|
||||
//! * `<archive>` -- serialization of the second directory entry
|
||||
//! * ...
|
||||
@ -52,7 +52,7 @@ pub(crate) mod dir_stack;
|
||||
pub(crate) mod extract;
|
||||
pub(crate) mod look_ahead_cache;
|
||||
pub(crate) mod metadata;
|
||||
pub(crate) mod tools;
|
||||
pub mod tools;
|
||||
|
||||
mod flags;
|
||||
pub use flags::Flags;
|
||||
@ -69,5 +69,3 @@ pub use extract::{
|
||||
/// memory, so we restrict the number of allowed entries to limit
|
||||
/// maximum memory usage.
|
||||
pub const ENCODER_MAX_ENTRIES: usize = 1024 * 1024;
|
||||
|
||||
pub use tools::{format_multi_line_entry, format_single_line_entry};
|
||||
|
@ -2,15 +2,31 @@
|
||||
|
||||
use std::ffi::OsStr;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::path::Path;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, Context, Error};
|
||||
use anyhow::{bail, format_err, Context, Error};
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
use pxar::{format::StatxTimestamp, mode, Entry, EntryKind, Metadata};
|
||||
use pathpatterns::MatchType;
|
||||
use pxar::accessor::aio::{Accessor, Directory, FileEntry};
|
||||
use pxar::accessor::ReadAt;
|
||||
use pxar::format::StatxTimestamp;
|
||||
use pxar::{mode, Entry, EntryKind, Metadata};
|
||||
|
||||
use pbs_api_types::BackupArchiveName;
|
||||
use pbs_datastore::catalog::{ArchiveEntry, CatalogEntryType, DirEntryAttribute};
|
||||
|
||||
use pbs_datastore::dynamic_index::{BufferedDynamicReader, LocalDynamicReadAt};
|
||||
use pbs_datastore::index::IndexFile;
|
||||
use pbs_datastore::BackupManifest;
|
||||
use pbs_tools::crypt_config::CryptConfig;
|
||||
use proxmox_log::{debug, info};
|
||||
|
||||
use crate::{BackupReader, RemoteChunkReader};
|
||||
|
||||
/// Get the file permissions as `nix::Mode`
|
||||
pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
|
||||
pub(crate) fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
|
||||
let mode = meta.stat.get_permission_bits();
|
||||
|
||||
u32::try_from(mode)
|
||||
@ -22,12 +38,14 @@ pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
|
||||
}
|
||||
|
||||
/// Make sure path is relative and not '.' or '..'.
|
||||
pub fn assert_relative_path<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
|
||||
pub(crate) fn assert_relative_path<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
|
||||
assert_relative_path_do(Path::new(path))
|
||||
}
|
||||
|
||||
/// Make sure path is a single component and not '.' or '..'.
|
||||
pub fn assert_single_path_component<S: AsRef<OsStr> + ?Sized>(path: &S) -> Result<(), Error> {
|
||||
pub(crate) fn assert_single_path_component<S: AsRef<OsStr> + ?Sized>(
|
||||
path: &S,
|
||||
) -> Result<(), Error> {
|
||||
assert_single_path_component_do(Path::new(path))
|
||||
}
|
||||
|
||||
@ -171,30 +189,23 @@ pub fn format_multi_line_entry(entry: &Entry) -> String {
|
||||
|
||||
let meta = entry.metadata();
|
||||
|
||||
let (size, link, type_name, payload_offset) = match entry.kind() {
|
||||
EntryKind::Version(version) => (format!("{version:?}"), String::new(), "version", None),
|
||||
let (size, link, type_name) = match entry.kind() {
|
||||
EntryKind::Version(version) => (format!("{version:?}"), String::new(), "version"),
|
||||
EntryKind::Prelude(prelude) => (
|
||||
"0".to_string(),
|
||||
format!("raw data: {:?} bytes", prelude.data.len()),
|
||||
"prelude",
|
||||
None,
|
||||
),
|
||||
EntryKind::File {
|
||||
size,
|
||||
payload_offset,
|
||||
..
|
||||
} => (format!("{}", *size), String::new(), "file", *payload_offset),
|
||||
EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"),
|
||||
EntryKind::Symlink(link) => (
|
||||
"0".to_string(),
|
||||
format!(" -> {:?}", link.as_os_str()),
|
||||
"symlink",
|
||||
None,
|
||||
),
|
||||
EntryKind::Hardlink(link) => (
|
||||
"0".to_string(),
|
||||
format!(" -> {:?}", link.as_os_str()),
|
||||
"symlink",
|
||||
None,
|
||||
),
|
||||
EntryKind::Device(dev) => (
|
||||
format!("{},{}", dev.major, dev.minor),
|
||||
@ -206,12 +217,11 @@ pub fn format_multi_line_entry(entry: &Entry) -> String {
|
||||
} else {
|
||||
"device"
|
||||
},
|
||||
None,
|
||||
),
|
||||
EntryKind::Socket => ("0".to_string(), String::new(), "socket", None),
|
||||
EntryKind::Fifo => ("0".to_string(), String::new(), "fifo", None),
|
||||
EntryKind::Directory => ("0".to_string(), String::new(), "directory", None),
|
||||
EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry", None),
|
||||
EntryKind::Socket => ("0".to_string(), String::new(), "socket"),
|
||||
EntryKind::Fifo => ("0".to_string(), String::new(), "fifo"),
|
||||
EntryKind::Directory => ("0".to_string(), String::new(), "directory"),
|
||||
EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry"),
|
||||
};
|
||||
|
||||
let file_name = match std::str::from_utf8(entry.path().as_os_str().as_bytes()) {
|
||||
@ -219,39 +229,230 @@ pub fn format_multi_line_entry(entry: &Entry) -> String {
|
||||
Err(_) => std::borrow::Cow::Owned(format!("{:?}", entry.path())),
|
||||
};
|
||||
|
||||
if let Some(offset) = payload_offset {
|
||||
format!(
|
||||
" File: {}{}\n \
|
||||
Size: {:<13} Type: {}\n\
|
||||
Access: ({:o}/{}) Uid: {:<5} Gid: {:<5}\n\
|
||||
Modify: {}\n
|
||||
PayloadOffset: {}\n",
|
||||
file_name,
|
||||
link,
|
||||
size,
|
||||
type_name,
|
||||
meta.file_mode(),
|
||||
mode_string,
|
||||
meta.stat.uid,
|
||||
meta.stat.gid,
|
||||
format_mtime(&meta.stat.mtime),
|
||||
offset,
|
||||
)
|
||||
format!(
|
||||
" File: {}{}\n \
|
||||
Size: {:<13} Type: {}\n\
|
||||
Access: ({:o}/{}) Uid: {:<5} Gid: {:<5}\n\
|
||||
Modify: {}\n",
|
||||
file_name,
|
||||
link,
|
||||
size,
|
||||
type_name,
|
||||
meta.file_mode(),
|
||||
mode_string,
|
||||
meta.stat.uid,
|
||||
meta.stat.gid,
|
||||
format_mtime(&meta.stat.mtime),
|
||||
)
|
||||
}
|
||||
|
||||
/// Look up the directory entries of the given directory `path` in a pxar archive via it's given
|
||||
/// `accessor` and return the entries formatted as [`ArchiveEntry`]'s, compatible with reading
|
||||
/// entries from the catalog.
|
||||
///
|
||||
/// If the optional `path_prefix` is given, all returned entry paths will be prefixed with it.
|
||||
pub async fn pxar_metadata_catalog_lookup<T: Clone + ReadAt>(
|
||||
accessor: Accessor<T>,
|
||||
path: &OsStr,
|
||||
path_prefix: Option<&str>,
|
||||
) -> Result<Vec<ArchiveEntry>, Error> {
|
||||
let root = accessor.open_root().await?;
|
||||
let dir_entry = root
|
||||
.lookup(&path)
|
||||
.await
|
||||
.map_err(|err| format_err!("lookup failed - {err}"))?
|
||||
.ok_or_else(|| format_err!("lookup failed - error opening '{path:?}'"))?;
|
||||
|
||||
let mut entries = Vec::new();
|
||||
if let EntryKind::Directory = dir_entry.kind() {
|
||||
let dir_entry = dir_entry
|
||||
.enter_directory()
|
||||
.await
|
||||
.map_err(|err| format_err!("failed to enter directory - {err}"))?;
|
||||
|
||||
let mut entries_iter = dir_entry.read_dir();
|
||||
while let Some(entry) = entries_iter.next().await {
|
||||
let entry = entry?.decode_entry().await?;
|
||||
|
||||
let entry_attr = match DirEntryAttribute::try_from(&entry) {
|
||||
Ok(attr) => attr,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
let entry_path = crate::pxar::tools::entry_path_with_prefix(&entry, path_prefix);
|
||||
entries.push(ArchiveEntry::new(
|
||||
entry_path.as_os_str().as_bytes(),
|
||||
Some(&entry_attr),
|
||||
));
|
||||
}
|
||||
} else {
|
||||
format!(
|
||||
" File: {}{}\n \
|
||||
Size: {:<13} Type: {}\n\
|
||||
Access: ({:o}/{}) Uid: {:<5} Gid: {:<5}\n\
|
||||
Modify: {}\n",
|
||||
file_name,
|
||||
link,
|
||||
size,
|
||||
type_name,
|
||||
meta.file_mode(),
|
||||
mode_string,
|
||||
meta.stat.uid,
|
||||
meta.stat.gid,
|
||||
format_mtime(&meta.stat.mtime),
|
||||
)
|
||||
bail!(format!(
|
||||
"expected directory entry, got entry kind '{:?}'",
|
||||
dir_entry.kind()
|
||||
));
|
||||
}
|
||||
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
/// Decode possible format version and prelude entries before getting the root directory
|
||||
/// entry.
|
||||
///
|
||||
/// Returns the root directory entry and, if present, the prelude entry
|
||||
pub fn handle_root_with_optional_format_version_prelude<R: pxar::decoder::SeqRead>(
|
||||
decoder: &mut pxar::decoder::sync::Decoder<R>,
|
||||
) -> Result<(pxar::Entry, Option<pxar::Entry>), Error> {
|
||||
let first = decoder
|
||||
.next()
|
||||
.ok_or_else(|| format_err!("missing root entry"))??;
|
||||
match first.kind() {
|
||||
pxar::EntryKind::Directory => {
|
||||
let version = pxar::format::FormatVersion::Version1;
|
||||
debug!("pxar format version '{version:?}'");
|
||||
Ok((first, None))
|
||||
}
|
||||
pxar::EntryKind::Version(version) => {
|
||||
debug!("pxar format version '{version:?}'");
|
||||
let second = decoder
|
||||
.next()
|
||||
.ok_or_else(|| format_err!("missing root entry"))??;
|
||||
match second.kind() {
|
||||
pxar::EntryKind::Directory => Ok((second, None)),
|
||||
pxar::EntryKind::Prelude(_prelude) => {
|
||||
let third = decoder
|
||||
.next()
|
||||
.ok_or_else(|| format_err!("missing root entry"))??;
|
||||
Ok((third, Some(second)))
|
||||
}
|
||||
_ => bail!("unexpected entry kind {:?}", second.kind()),
|
||||
}
|
||||
}
|
||||
_ => bail!("unexpected entry kind {:?}", first.kind()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_remote_pxar_reader(
|
||||
archive_name: &BackupArchiveName,
|
||||
client: Arc<BackupReader>,
|
||||
manifest: &BackupManifest,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
) -> Result<(LocalDynamicReadAt<RemoteChunkReader>, u64), Error> {
|
||||
let index = client
|
||||
.download_dynamic_index(manifest, archive_name)
|
||||
.await?;
|
||||
let most_used = index.find_most_used_chunks(8);
|
||||
|
||||
let file_info = manifest.lookup_file_info(archive_name)?;
|
||||
let chunk_reader = RemoteChunkReader::new(
|
||||
client.clone(),
|
||||
crypt_config,
|
||||
file_info.chunk_crypt_mode(),
|
||||
most_used,
|
||||
);
|
||||
|
||||
let reader = BufferedDynamicReader::new(index, chunk_reader);
|
||||
let archive_size = reader.archive_size();
|
||||
|
||||
Ok((LocalDynamicReadAt::new(reader), archive_size))
|
||||
}
|
||||
|
||||
/// Generate entry path for given [`FileEntry`], prefixed by given `path_prefix` component(s).
|
||||
pub(crate) fn entry_path_with_prefix<T: Clone + ReadAt>(
|
||||
entry: &FileEntry<T>,
|
||||
path_prefix: Option<&str>,
|
||||
) -> PathBuf {
|
||||
if let Some(prefix) = path_prefix {
|
||||
let mut entry_path = PathBuf::from(prefix);
|
||||
match entry.path().strip_prefix("/") {
|
||||
Ok(path) => entry_path.push(path),
|
||||
Err(_) => entry_path.push(entry.path()),
|
||||
}
|
||||
entry_path
|
||||
} else {
|
||||
PathBuf::from(entry.path())
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a sorted list of pxar archive entries from given parent entry via the pxar accessor.
|
||||
pub(crate) async fn pxar_metadata_read_dir<T: Clone + Send + Sync + ReadAt>(
|
||||
parent_dir: Directory<T>,
|
||||
) -> Result<Vec<FileEntry<T>>, Error> {
|
||||
let mut entries_iter = parent_dir.read_dir();
|
||||
let mut entries = Vec::new();
|
||||
while let Some(entry) = entries_iter.next().await {
|
||||
let entry = entry?.decode_entry().await?;
|
||||
entries.push(entry);
|
||||
}
|
||||
entries.sort_unstable_by(|a, b| a.path().cmp(b.path()));
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
/// Recursively iterate over pxar archive entries and dump them using the same format used to dump
|
||||
/// entries from a catalog.
|
||||
pub async fn pxar_metadata_catalog_dump_dir<T: Clone + Send + Sync + ReadAt>(
|
||||
parent_dir: Directory<T>,
|
||||
path_prefix: Option<&str>,
|
||||
) -> Result<(), Error> {
|
||||
let entries = pxar_metadata_read_dir(parent_dir).await?;
|
||||
for entry in entries {
|
||||
let entry_path = entry_path_with_prefix(&entry, path_prefix);
|
||||
|
||||
Box::pin(async move {
|
||||
if let Ok(attr) = DirEntryAttribute::try_from(&entry) {
|
||||
let etype = CatalogEntryType::from(&attr);
|
||||
match attr {
|
||||
DirEntryAttribute::File { size, mtime } => {
|
||||
let mut mtime_string = mtime.to_string();
|
||||
if let Ok(s) = proxmox_time::strftime_local("%FT%TZ", mtime) {
|
||||
mtime_string = s;
|
||||
}
|
||||
info!("{etype} {entry_path:?} {size} {mtime_string}");
|
||||
}
|
||||
DirEntryAttribute::Directory { .. } => {
|
||||
info!("{etype} {entry_path:?}");
|
||||
let dir = entry.enter_directory().await?;
|
||||
pxar_metadata_catalog_dump_dir(dir, path_prefix).await?;
|
||||
}
|
||||
_ => info!("{etype} {entry_path:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
Ok::<(), Error>(())
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Recursively iterate over pxar archive entries and call the callback on entries matching the
|
||||
/// match patterns.
|
||||
pub async fn pxar_metadata_catalog_find<'future, T: Clone + Send + Sync + ReadAt + 'future>(
|
||||
parent_dir: Directory<T>,
|
||||
match_list: &'future (impl pathpatterns::MatchList<'future> + Sync),
|
||||
callback: &'future (dyn Fn(&[u8]) -> Result<(), Error> + Send + Sync),
|
||||
) -> Result<(), Error> {
|
||||
let entries = pxar_metadata_read_dir(parent_dir).await?;
|
||||
for entry in entries {
|
||||
Box::pin(async move {
|
||||
let file_mode = entry.metadata().file_type() as u32;
|
||||
let entry_path = entry_path_with_prefix(&entry, Some("/"))
|
||||
.as_os_str()
|
||||
.to_owned();
|
||||
|
||||
match match_list.matches(entry_path.as_bytes(), file_mode) {
|
||||
Ok(Some(MatchType::Exclude)) => return Ok::<(), Error>(()),
|
||||
Ok(Some(MatchType::Include)) => callback(entry_path.as_bytes())?,
|
||||
_ => (),
|
||||
}
|
||||
|
||||
if let EntryKind::Directory = entry.kind() {
|
||||
let dir_entry = entry.enter_directory().await?;
|
||||
pxar_metadata_catalog_find(dir_entry, match_list, callback).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|