cargo vendor

This commit is contained in:
Alexander Burmatov 2024-09-06 01:05:13 +03:00
parent e90948a803
commit 0bb97dbe0f
9691 changed files with 3514284 additions and 847974 deletions

1176
pve-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1 +1 @@
{"files":{"CHANGELOG.md":"ef9fa958318e442f1da7d204494cefec75c144aa6d5d5c93b0a5d6fcdf4ef6c6","Cargo.lock":"20b23c454fc3127f08a1bcd2864bbf029793759e6411fba24d44d8f4b7831ad0","Cargo.toml":"d0f15fde73d42bdf00e93f960dff908447225bede9364cb1659e44740a536c04","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"e99d88d232bf57d70f0fb87f6b496d44b6653f99f8a63d250a54c61ea4bcde40","README.md":"76d28502bd2e83f6a9e3576bd45e9a7fe5308448c4b5384b0d249515b5f67a5c","bench.plot.r":"6a5d7a4d36ed6b3d9919be703a479bef47698bf947818b483ff03951df2d4e01","benchmark.sh":"b35f89b1ca2c1dc0476cdd07f0284b72d41920d1c7b6054072f50ffba296d78d","coverage.sh":"4677e81922d08a82e83068a911717a247c66af12e559f37b78b6be3337ac9f07","examples/addr2line.rs":"3c5eb5a6726634df6cf53e4d67ee9f90c9ac09838303947f45c3bea1e84548b5","rustfmt.toml":"01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b","src/builtin_split_dwarf_loader.rs":"dc6979de81b35f82e97275e6be27ec61f3c4225ea10574a9e031813e00185174","src/function.rs":"68f047e0c78afe18ad165db255c8254ee74c35cd6df0cc07e400252981f661ed","src/lazy.rs":"0bf23f7098f1902f181e43c2ffa82a3f86df2c0dbcb9bc0ebce6a0168dd8b060","src/lib.rs":"9d6531f71fd138d31cc7596db9ab234198d0895a21ea9cb116434c19ec78b660","tests/correctness.rs":"4081f8019535305e3aa254c6a4e1436272dd873f9717c687ca0e66ea8d5871ed","tests/output_equivalence.rs":"b2cd7c59fa55808a2e66e9fe7f160d846867e3ecefe22c22a818f822c3c41f23","tests/parse.rs":"c2f7362e4679c1b4803b12ec6e8dca6da96aed7273fd210a857524a4182c30e7"},"package":"8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"}
{"files":{"CHANGELOG.md":"ce94cdbac54bd8018cbbb56b19c4d140f1ceb497c6457b7c1a83c1f2866e20d5","Cargo.lock":"d48e85d4c679f6a893ac8045649bd95715640ee432bae12cddbb10d218383277","Cargo.toml":"9f8853132e41d62586629fe1006d4767330ba9d270fe18c2b564a02cbd7610f5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"e99d88d232bf57d70f0fb87f6b496d44b6653f99f8a63d250a54c61ea4bcde40","README.md":"76d28502bd2e83f6a9e3576bd45e9a7fe5308448c4b5384b0d249515b5f67a5c","examples/addr2line.rs":"3c5eb5a6726634df6cf53e4d67ee9f90c9ac09838303947f45c3bea1e84548b5","rustfmt.toml":"01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b","src/builtin_split_dwarf_loader.rs":"b1e7efd9fdb9f494a6e9eb57a71596fc2d9cd6d46e2f237a01c91f8ba8115884","src/function.rs":"68f047e0c78afe18ad165db255c8254ee74c35cd6df0cc07e400252981f661ed","src/lazy.rs":"0bf23f7098f1902f181e43c2ffa82a3f86df2c0dbcb9bc0ebce6a0168dd8b060","src/lib.rs":"541e363ea28ac6705ef591b60792e6b29a5beeeb1822bf711d95bdf40283be50","tests/correctness.rs":"5b765fb8f84bb466baefb990406a50712a1b77c25f2a414cd070e6a77a9437b2","tests/output_equivalence.rs":"b2cd7c59fa55808a2e66e9fe7f160d846867e3ecefe22c22a818f822c3c41f23","tests/parse.rs":"e9bbffbb56de16b2f0bda3b0ab294e3e14816a83c3c1adf74676211ee80c34aa"},"package":"6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678"}

View File

@ -2,6 +2,14 @@
--------------------------------------------------------------------------------
## 0.22.0 (2024/04/11)
### Breaking changes
* Updated `gimli` and `object` dependencies.
--------------------------------------------------------------------------------
## 0.21.0 (2023/08/12)
### Breaking changes

433
pve-rs/vendor/addr2line/Cargo.lock generated vendored
View File

@ -4,16 +4,16 @@ version = 3
[[package]]
name = "addr2line"
version = "0.19.0"
version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97"
checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
dependencies = [
"gimli 0.27.2",
"gimli 0.28.1",
]
[[package]]
name = "addr2line"
version = "0.21.0"
version = "0.22.0"
dependencies = [
"backtrace",
"clap",
@ -21,10 +21,10 @@ dependencies = [
"cpp_demangle",
"fallible-iterator",
"findshlibs",
"gimli 0.28.0",
"gimli 0.29.0",
"libtest-mimic",
"memmap2",
"object 0.32.0",
"object 0.35.0",
"rustc-demangle",
"rustc-std-workspace-alloc",
"rustc-std-workspace-core",
@ -40,91 +40,84 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "anstream"
version = "0.3.2"
version = "0.6.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163"
checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"is-terminal",
"utf8parse",
]
[[package]]
name = "anstyle"
version = "1.0.1"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd"
checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc"
[[package]]
name = "anstyle-parse"
version = "0.2.1"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333"
checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
version = "1.0.0"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648"
dependencies = [
"windows-sys",
"windows-sys 0.52.0",
]
[[package]]
name = "anstyle-wincon"
version = "1.0.2"
version = "3.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c"
checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7"
dependencies = [
"anstyle",
"windows-sys",
"windows-sys 0.52.0",
]
[[package]]
name = "backtrace"
version = "0.3.67"
version = "0.3.71"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca"
checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d"
dependencies = [
"addr2line 0.19.0",
"addr2line 0.21.0",
"cc",
"cfg-if",
"libc",
"miniz_oxide",
"object 0.30.3",
"object 0.32.2",
"rustc-demangle",
]
[[package]]
name = "bitflags"
version = "1.3.2"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635"
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
[[package]]
name = "byteorder"
version = "1.4.3"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "cc"
version = "1.0.79"
version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
checksum = "2678b2e3449475e95b0aa6f9b506a28e61b3dc8996592b983695e8ebb58a8b41"
[[package]]
name = "cfg-if"
@ -134,20 +127,19 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "clap"
version = "4.3.21"
version = "4.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd"
checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0"
dependencies = [
"clap_builder",
"clap_derive",
"once_cell",
]
[[package]]
name = "clap_builder"
version = "4.3.21"
version = "4.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa"
checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4"
dependencies = [
"anstream",
"anstyle",
@ -158,21 +150,21 @@ dependencies = [
[[package]]
name = "clap_derive"
version = "4.3.12"
version = "4.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050"
checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.15",
"syn 2.0.58",
]
[[package]]
name = "clap_lex"
version = "0.5.0"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b"
checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce"
[[package]]
name = "colorchoice"
@ -182,47 +174,56 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "compiler_builtins"
version = "0.1.91"
version = "0.1.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "571298a3cce7e2afbd3d61abb91a18667d5ab25993ec577a88ee8ac45f00cc3a"
checksum = "f11973008a8cf741fe6d22f339eba21fd0ca81e2760a769ba8243ed6c21edd7e"
[[package]]
name = "cpp_demangle"
version = "0.4.1"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c76f98bdfc7f66172e6c7065f981ebb576ffc903fe4c0561d9f0c2509226dc6"
checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119"
dependencies = [
"cfg-if",
]
[[package]]
name = "crc32fast"
version = "1.3.2"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa"
dependencies = [
"cfg-if",
]
[[package]]
name = "errno"
version = "0.3.2"
name = "derive_more"
version = "0.99.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f"
checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321"
dependencies = [
"errno-dragonfly",
"libc",
"windows-sys",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "errno-dragonfly"
version = "0.1.2"
name = "errno"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245"
dependencies = [
"cc",
"libc",
"windows-sys 0.52.0",
]
[[package]]
name = "escape8259"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba4f4911e3666fcd7826997b4745c8224295a6f3072f1418c3067b97a67557ee"
dependencies = [
"rustversion",
]
[[package]]
@ -245,9 +246,9 @@ dependencies = [
[[package]]
name = "flate2"
version = "1.0.25"
version = "1.0.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841"
checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e"
dependencies = [
"crc32fast",
"miniz_oxide",
@ -255,15 +256,15 @@ dependencies = [
[[package]]
name = "gimli"
version = "0.27.2"
version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4"
checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
[[package]]
name = "gimli"
version = "0.28.0"
version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd"
dependencies = [
"compiler_builtins",
"fallible-iterator",
@ -274,46 +275,15 @@ dependencies = [
[[package]]
name = "heck"
version = "0.4.1"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "hermit-abi"
version = "0.2.6"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
dependencies = [
"libc",
]
[[package]]
name = "hermit-abi"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b"
[[package]]
name = "io-lifetimes"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
dependencies = [
"hermit-abi 0.3.2",
"libc",
"windows-sys",
]
[[package]]
name = "is-terminal"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
dependencies = [
"hermit-abi 0.3.2",
"rustix 0.38.8",
"windows-sys",
]
checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
[[package]]
name = "lazy_static"
@ -323,116 +293,105 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.147"
version = "0.2.153"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
[[package]]
name = "libtest-mimic"
version = "0.6.1"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d8de370f98a6cb8a4606618e53e802f93b094ddec0f96988eaec2c27e6e9ce7"
checksum = "fefdf21230d6143476a28adbee3d930e2b68a3d56443c777cae3fe9340eebff9"
dependencies = [
"clap",
"escape8259",
"termcolor",
"threadpool",
]
[[package]]
name = "linux-raw-sys"
version = "0.3.8"
version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
[[package]]
name = "linux-raw-sys"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503"
checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c"
[[package]]
name = "memchr"
version = "2.5.0"
version = "2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d"
[[package]]
name = "memmap2"
version = "0.5.10"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327"
checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322"
dependencies = [
"libc",
]
[[package]]
name = "miniz_oxide"
version = "0.6.2"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa"
checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7"
dependencies = [
"adler",
]
[[package]]
name = "num_cpus"
version = "1.15.0"
version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b"
checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43"
dependencies = [
"hermit-abi 0.2.6",
"hermit-abi",
"libc",
]
[[package]]
name = "object"
version = "0.30.3"
version = "0.32.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439"
checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441"
dependencies = [
"memchr",
]
[[package]]
name = "object"
version = "0.32.0"
version = "0.35.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe"
checksum = "b8ec7ab813848ba4522158d5517a6093db1ded27575b070f4177b8d12b41db5e"
dependencies = [
"flate2",
"memchr",
"ruzstd",
]
[[package]]
name = "once_cell"
version = "1.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
[[package]]
name = "proc-macro2"
version = "1.0.56"
version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435"
checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.26"
version = "1.0.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rustc-demangle"
version = "0.1.22"
version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b"
checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]]
name = "rustc-std-workspace-alloc"
@ -448,47 +407,39 @@ checksum = "1956f5517128a2b6f23ab2dadf1a976f4f5b27962e7724c2bf3d45e539ec098c"
[[package]]
name = "rustix"
version = "0.37.23"
version = "0.38.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06"
checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89"
dependencies = [
"bitflags 1.3.2",
"bitflags",
"errno",
"io-lifetimes",
"libc",
"linux-raw-sys 0.3.8",
"windows-sys",
"linux-raw-sys",
"windows-sys 0.52.0",
]
[[package]]
name = "rustix"
version = "0.38.8"
name = "rustversion"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f"
dependencies = [
"bitflags 2.4.0",
"errno",
"libc",
"linux-raw-sys 0.4.5",
"windows-sys",
]
checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47"
[[package]]
name = "ruzstd"
version = "0.4.0"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3ffab8f9715a0d455df4bbb9d21e91135aab3cd3ca187af0cd0c3c3f868fdc"
checksum = "5174a470eeb535a721ae9fdd6e291c2411a906b96592182d05217591d5c5cf7b"
dependencies = [
"byteorder",
"thiserror-core",
"derive_more",
"twox-hash",
]
[[package]]
name = "smallvec"
version = "1.10.0"
version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "stable_deref_trait"
@ -504,9 +455,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "strsim"
version = "0.10.0"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "syn"
@ -521,9 +472,9 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.15"
version = "2.0.58"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822"
checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687"
dependencies = [
"proc-macro2",
"quote",
@ -532,41 +483,21 @@ dependencies = [
[[package]]
name = "termcolor"
version = "1.2.0"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
dependencies = [
"winapi-util",
]
[[package]]
name = "terminal_size"
version = "0.2.6"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237"
checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7"
dependencies = [
"rustix 0.37.23",
"windows-sys",
]
[[package]]
name = "thiserror-core"
version = "1.0.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d97345f6437bb2004cd58819d8a9ef8e36cdd7661c2abc4bbde0a7c40d9f497"
dependencies = [
"thiserror-core-impl",
]
[[package]]
name = "thiserror-core-impl"
version = "1.0.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10ac1c5050e43014d16b2f94d0d2ce79e65ffdd8b38d8048f9c8f6a8a6da62ac"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
"rustix",
"windows-sys 0.48.0",
]
[[package]]
@ -596,9 +527,9 @@ checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a"
[[package]]
name = "unicode-ident"
version = "1.0.8"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "utf8parse"
@ -624,9 +555,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.5"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596"
dependencies = [
"winapi",
]
@ -643,62 +574,128 @@ version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
dependencies = [
"windows-targets",
"windows-targets 0.48.5",
]
[[package]]
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets 0.52.4",
]
[[package]]
name = "windows-targets"
version = "0.48.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f"
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
"windows_aarch64_gnullvm 0.48.5",
"windows_aarch64_msvc 0.48.5",
"windows_i686_gnu 0.48.5",
"windows_i686_msvc 0.48.5",
"windows_x86_64_gnu 0.48.5",
"windows_x86_64_gnullvm 0.48.5",
"windows_x86_64_msvc 0.48.5",
]
[[package]]
name = "windows-targets"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b"
dependencies = [
"windows_aarch64_gnullvm 0.52.4",
"windows_aarch64_msvc 0.52.4",
"windows_i686_gnu 0.52.4",
"windows_i686_msvc 0.52.4",
"windows_x86_64_gnu 0.52.4",
"windows_x86_64_gnullvm 0.52.4",
"windows_x86_64_msvc 0.52.4",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.48.0"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc"
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9"
[[package]]
name = "windows_aarch64_msvc"
version = "0.48.0"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675"
[[package]]
name = "windows_i686_gnu"
version = "0.48.0"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_gnu"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3"
[[package]]
name = "windows_i686_msvc"
version = "0.48.0"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_i686_msvc"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02"
[[package]]
name = "windows_x86_64_gnu"
version = "0.48.0"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.48.0"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177"
[[package]]
name = "windows_x86_64_msvc"
version = "0.48.0"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"

View File

@ -13,11 +13,13 @@
edition = "2018"
rust-version = "1.65"
name = "addr2line"
version = "0.21.0"
version = "0.22.0"
exclude = [
"/benches/*",
"/fixtures/*",
".github",
"*.sh",
"*.r",
]
description = "A cross-platform symbolication library written in Rust, using `gimli`"
documentation = "https://docs.rs/addr2line"
@ -35,10 +37,10 @@ repository = "https://github.com/gimli-rs/addr2line"
[profile.bench]
codegen-units = 1
debug = true
debug = 2
[profile.release]
debug = true
debug = 2
[[example]]
name = "addr2line"
@ -83,16 +85,16 @@ optional = true
default-features = false
[dependencies.gimli]
version = "0.28.0"
version = "0.29.0"
features = ["read"]
default-features = false
[dependencies.memmap2]
version = "0.5.5"
version = "0.9.4"
optional = true
[dependencies.object]
version = "0.32.0"
version = "0.35.0"
features = ["read"]
optional = true
default-features = false
@ -117,7 +119,7 @@ features = ["wrap_help"]
version = "0.10"
[dev-dependencies.libtest-mimic]
version = "0.6.1"
version = "0.7.2"
[dev-dependencies.typed-arena]
version = "2"

View File

@ -1,23 +0,0 @@
v <- read.table(file("stdin"))
t <- data.frame(prog=v[,1], funcs=(v[,2]=="func"), time=v[,3], mem=v[,4], stringsAsFactors=FALSE)
t$prog <- as.character(t$prog)
t$prog[t$prog == "master"] <- "gimli-rs/addr2line"
t$funcs[t$funcs == TRUE] <- "With functions"
t$funcs[t$funcs == FALSE] <- "File/line only"
t$mem = t$mem / 1024.0
library(ggplot2)
p <- ggplot(data=t, aes(x=prog, y=time, fill=prog))
p <- p + geom_bar(stat = "identity")
p <- p + facet_wrap(~ funcs)
p <- p + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())
p <- p + ylab("time (s)") + ggtitle("addr2line runtime")
ggsave('time.png',plot=p,width=10,height=6)
p <- ggplot(data=t, aes(x=prog, y=mem, fill=prog))
p <- p + geom_bar(stat = "identity")
p <- p + facet_wrap(~ funcs)
p <- p + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())
p <- p + ylab("memory (kB)") + ggtitle("addr2line memory usage")
ggsave('memory.png',plot=p,width=10,height=6)

View File

@ -1,112 +0,0 @@
#!/bin/bash
if [[ $# -le 1 ]]; then
echo "Usage: $0 <executable> [<addresses>] REFS..."
exit 1
fi
target="$1"
shift
addresses=""
if [[ -e "$1" ]]; then
addresses="$1"
shift
fi
# path to "us"
# readlink -f, but more portable:
dirname=$(perl -e 'use Cwd "abs_path";print abs_path(shift)' "$(dirname "$0")")
# https://stackoverflow.com/a/2358432/472927
{
# compile all refs
pushd "$dirname" > /dev/null
# if the user has some local changes, preserve them
nstashed=$(git stash list | wc -l)
echo "==> Stashing any local modifications"
git stash --keep-index > /dev/null
popstash() {
# https://stackoverflow.com/q/24520791/472927
if [[ "$(git stash list | wc -l)" -ne "$nstashed" ]]; then
echo "==> Restoring stashed state"
git stash pop > /dev/null
fi
}
# if the user has added stuff to the index, abort
if ! git diff-index --quiet HEAD --; then
echo "Refusing to overwrite outstanding git changes"
popstash
exit 2
fi
current=$(git symbolic-ref --short HEAD)
for ref in "$@"; do
echo "==> Compiling $ref"
git checkout -q "$ref"
commit=$(git rev-parse HEAD)
fn="target/release/addr2line-$commit"
if [[ ! -e "$fn" ]]; then
cargo build --release --example addr2line
cp target/release/examples/addr2line "$fn"
fi
if [[ "$ref" != "$commit" ]]; then
ln -sfn "addr2line-$commit" target/release/addr2line-"$ref"
fi
done
git checkout -q "$current"
popstash
popd > /dev/null
# get us some addresses to look up
if [[ -z "$addresses" ]]; then
echo "==> Looking for benchmarking addresses (this may take a while)"
addresses=$(mktemp tmp.XXXXXXXXXX)
objdump -C -x --disassemble -l "$target" \
| grep -P '0[048]:' \
| awk '{print $1}' \
| sed 's/:$//' \
> "$addresses"
echo " -> Addresses stored in $addresses; you should re-use it next time"
fi
run() {
func="$1"
name="$2"
cmd="$3"
args="$4"
printf "%s\t%s\t" "$name" "$func"
if [[ "$cmd" =~ llvm-symbolizer ]]; then
/usr/bin/time -f '%e\t%M' "$cmd" $args -obj="$target" < "$addresses" 2>&1 >/dev/null
else
/usr/bin/time -f '%e\t%M' "$cmd" $args -e "$target" < "$addresses" 2>&1 >/dev/null
fi
}
# run without functions
log1=$(mktemp tmp.XXXXXXXXXX)
echo "==> Benchmarking"
run nofunc binutils addr2line >> "$log1"
#run nofunc elfutils eu-addr2line >> "$log1"
run nofunc llvm-sym llvm-symbolizer -functions=none >> "$log1"
for ref in "$@"; do
run nofunc "$ref" "$dirname/target/release/addr2line-$ref" >> "$log1"
done
cat "$log1" | column -t
# run with functions
log2=$(mktemp tmp.XXXXXXXXXX)
echo "==> Benchmarking with -f"
run func binutils addr2line "-f -i" >> "$log2"
#run func elfutils eu-addr2line "-f -i" >> "$log2"
run func llvm-sym llvm-symbolizer "-functions=linkage -demangle=0" >> "$log2"
for ref in "$@"; do
run func "$ref" "$dirname/target/release/addr2line-$ref" "-f -i" >> "$log2"
done
cat "$log2" | column -t
cat "$log2" >> "$log1"; rm "$log2"
echo "==> Plotting"
Rscript --no-readline --no-restore --no-save "$dirname/bench.plot.r" < "$log1"
echo "==> Cleaning up"
rm "$log1"
exit 0
}

View File

@ -1,5 +0,0 @@
#!/bin/sh
# Run tarpaulin and pycobertura to generate coverage.html.
cargo tarpaulin --skip-clean --out Xml
pycobertura show --format html --output coverage.html cobertura.xml

View File

@ -27,14 +27,14 @@ fn convert_path<R: gimli::Reader<Endian = gimli::RunTimeEndian>>(
Ok(PathBuf::from(s))
}
fn load_section<'data: 'file, 'file, O, R, F>(
fn load_section<'data, O, R, F>(
id: gimli::SectionId,
file: &'file O,
file: &O,
endian: R::Endian,
loader: &mut F,
) -> Result<R, gimli::Error>
where
O: object::Object<'data, 'file>,
O: object::Object<'data>,
R: gimli::Reader<Endian = gimli::RunTimeEndian>,
F: FnMut(Cow<'data, [u8]>, R::Endian) -> R,
{

View File

@ -203,9 +203,7 @@ impl Context<gimli::EndianRcSlice<gimli::RunTimeEndian>> {
/// Performance sensitive applications may want to use `Context::from_dwarf`
/// with a more specialised `gimli::Reader` implementation.
#[inline]
pub fn new<'data: 'file, 'file, O: object::Object<'data, 'file>>(
file: &'file O,
) -> Result<Self, Error> {
pub fn new<'data, O: object::Object<'data>>(file: &O) -> Result<Self, Error> {
Self::new_with_sup(file, None)
}
@ -219,9 +217,9 @@ impl Context<gimli::EndianRcSlice<gimli::RunTimeEndian>> {
///
/// Performance sensitive applications may want to use `Context::from_dwarf`
/// with a more specialised `gimli::Reader` implementation.
pub fn new_with_sup<'data: 'file, 'file, O: object::Object<'data, 'file>>(
file: &'file O,
sup_file: Option<&'file O>,
pub fn new_with_sup<'data, O: object::Object<'data>>(
file: &O,
sup_file: Option<&O>,
) -> Result<Self, Error> {
let endian = if file.is_little_endian() {
gimli::RunTimeEndian::Little
@ -229,13 +227,13 @@ impl Context<gimli::EndianRcSlice<gimli::RunTimeEndian>> {
gimli::RunTimeEndian::Big
};
fn load_section<'data: 'file, 'file, O, Endian>(
fn load_section<'data, O, Endian>(
id: gimli::SectionId,
file: &'file O,
file: &O,
endian: Endian,
) -> Result<gimli::EndianRcSlice<Endian>, Error>
where
O: object::Object<'data, 'file>,
O: object::Object<'data>,
Endian: gimli::Endianity,
{
use object::ObjectSection;

View File

@ -44,13 +44,13 @@ fn correctness() {
gimli::RunTimeEndian::Big
};
fn load_section<'data: 'file, 'file, O, Endian>(
fn load_section<'data, O, Endian>(
id: gimli::SectionId,
file: &'file O,
file: &O,
endian: Endian,
) -> Result<gimli::EndianArcSlice<Endian>, gimli::Error>
where
O: object::Object<'data, 'file>,
O: object::Object<'data>,
Endian: gimli::Endianity,
{
use object::ObjectSection;

View File

@ -26,7 +26,7 @@ fn with_file<F: FnOnce(&object::File<'_>)>(target: &path::Path, f: F) {
f(&file)
}
fn dwarf_load<'a>(object: &object::File<'a>) -> gimli::Dwarf<Cow<'a, [u8]>> {
fn dwarf_load<'a>(object: &object::File<'a>) -> gimli::DwarfSections<Cow<'a, [u8]>> {
let load_section = |id: gimli::SectionId| -> Result<Cow<'a, [u8]>, gimli::Error> {
use object::ObjectSection;
@ -36,11 +36,11 @@ fn dwarf_load<'a>(object: &object::File<'a>) -> gimli::Dwarf<Cow<'a, [u8]>> {
.unwrap_or(&[][..]);
Ok(Cow::Borrowed(data))
};
gimli::Dwarf::load(&load_section).unwrap()
gimli::DwarfSections::load(&load_section).unwrap()
}
fn dwarf_borrow<'a>(
dwarf: &'a gimli::Dwarf<Cow<'_, [u8]>>,
dwarf: &'a gimli::DwarfSections<Cow<'_, [u8]>>,
) -> gimli::Dwarf<gimli::EndianSlice<'a, gimli::LittleEndian>> {
let borrow_section: &dyn for<'b> Fn(
&'b Cow<'_, [u8]>,

View File

@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"52435caf085b428cdb6171a34f4980f52aaaf541a3dced226c92eb82f69a48a7","Cargo.toml":"56b9cca6450964cbe772b6519bc048c2f56cc80e9261de1126d789c5e1951136","LICENSE-0BSD":"861399f8c21c042b110517e76dc6b63a2b334276c8cf17412fc3c8908ca8dc17","LICENSE-APACHE":"8ada45cd9f843acf64e4722ae262c622a2b3b3007c7310ef36ac1061a30f6adb","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"cd955d5d6a49161e6f7a04df4a5963581b66ed43fd5096b2dedca8e295efe4f9","RELEASE_PROCESS.md":"a86cd10fc70f167f8d00e9e4ce0c6b4ebdfa1865058390dffd1e0ad4d3e68d9d","benches/bench.rs":"d67bef1c7f36ed300a8fbcf9d50b9dfdead1fd340bf87a4d47d99a0c1c042c04","src/algo.rs":"932c2bc591d13fe4470185125617b5aaa660a3898f23b553acc85df0bf49dded","src/lib.rs":"4acd41668fe30daffa37084e7e223f268957b816afc1864ffb3f5d6d7adf0890"},"package":"512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"}

77
pve-rs/vendor/adler2/CHANGELOG.md vendored Normal file
View File

@ -0,0 +1,77 @@
# Changelog
All notable changes to this project will be documented in this file.
---
## [2.0.0](https://github.com/Frommi/miniz_oxide/compare/1.0.2..2.0.0) - 2024-08-04
First release of adler2 - fork of adler crate as the original is unmaintained and archived
##### Changes since last version of Adler:
### Bug Fixes
- **(core)** change to rust 2021 edition, update repository info and links, update author info - ([867b115](https://github.com/Frommi/miniz_oxide/commit/867b115bad79bf62098f2acccc81bf53ec5a125d)) - oyvindln
- **(core)** simplify some code and fix benches - ([128fb9c](https://github.com/Frommi/miniz_oxide/commit/128fb9cb6cad5c3a54fb0b6c68549d80b79a1fe0)) - oyvindln
### Changelog of original adler crate
---
## [1.0.2 - 2021-02-26](https://github.com/jonas-schievink/adler/releases/tag/v1.0.2)
- Fix doctest on big-endian systems ([#9]).
[#9]: https://github.com/jonas-schievink/adler/pull/9
## [1.0.1 - 2020-11-08](https://github.com/jonas-schievink/adler/releases/tag/v1.0.1)
### Fixes
- Fix documentation on docs.rs.
## [1.0.0 - 2020-11-08](https://github.com/jonas-schievink/adler/releases/tag/v1.0.0)
### Fixes
- Fix `cargo test --no-default-features` ([#5]).
### Improvements
- Extended and clarified documentation.
- Added more rustdoc examples.
- Extended CI to test the crate with `--no-default-features`.
### Breaking Changes
- `adler32_reader` now takes its generic argument by value instead of as a `&mut`.
- Renamed `adler32_reader` to `adler32`.
## [0.2.3 - 2020-07-11](https://github.com/jonas-schievink/adler/releases/tag/v0.2.3)
- Process 4 Bytes at a time, improving performance by up to 50% ([#2]).
## [0.2.2 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.2)
- Bump MSRV to 1.31.0.
## [0.2.1 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.1)
- Add a few `#[inline]` annotations to small functions.
- Fix CI badge.
- Allow integration into libstd.
## [0.2.0 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.0)
- Support `#![no_std]` when using `default-features = false`.
- Improve performance by around 7x.
- Support Rust 1.8.0.
- Improve API naming.
## [0.1.0 - 2020-06-26](https://github.com/jonas-schievink/adler/releases/tag/v0.1.0)
Initial release.
[#2]: https://github.com/jonas-schievink/adler/pull/2
[#5]: https://github.com/jonas-schievink/adler/pull/5

97
pve-rs/vendor/adler2/Cargo.toml vendored Normal file
View File

@ -0,0 +1,97 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
name = "adler2"
version = "2.0.0"
authors = [
"Jonas Schievink <jonasschievink@gmail.com>",
"oyvindln <oyvindln@users.noreply.github.com>",
]
build = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "A simple clean-room implementation of the Adler-32 checksum"
documentation = "https://docs.rs/adler2/"
readme = "README.md"
keywords = [
"checksum",
"integrity",
"hash",
"adler32",
"zlib",
]
categories = ["algorithms"]
license = "0BSD OR MIT OR Apache-2.0"
repository = "https://github.com/oyvindln/adler2"
[package.metadata.docs.rs]
rustdoc-args = ["--cfg=docsrs"]
[package.metadata.release]
no-dev-version = true
pre-release-commit-message = "Release {{version}}"
tag-message = "{{version}}"
[[package.metadata.release.pre-release-replacements]]
file = "CHANGELOG.md"
replace = """
## Unreleased
No changes.
## [{{version}} - {{date}}](https://github.com/jonas-schievink/adler/releases/tag/v{{version}})
"""
search = """
## Unreleased
"""
[[package.metadata.release.pre-release-replacements]]
file = "README.md"
replace = 'adler = "{{version}}"'
search = 'adler = "[a-z0-9\\.-]+"'
[[package.metadata.release.pre-release-replacements]]
file = "src/lib.rs"
replace = "https://docs.rs/adler/{{version}}"
search = 'https://docs.rs/adler/[a-z0-9\.-]+'
[lib]
name = "adler2"
path = "src/lib.rs"
[[bench]]
name = "bench"
path = "benches/bench.rs"
harness = false
[dependencies.compiler_builtins]
version = "0.1.2"
optional = true
[dependencies.core]
version = "1.0.0"
optional = true
package = "rustc-std-workspace-core"
[dev-dependencies.criterion]
version = "0.3.2"
[features]
default = ["std"]
rustc-dep-of-std = [
"core",
"compiler_builtins",
]
std = []

12
pve-rs/vendor/adler2/LICENSE-0BSD vendored Normal file
View File

@ -0,0 +1,12 @@
Copyright (C) Jonas Schievink <jonasschievink@gmail.com>
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

201
pve-rs/vendor/adler2/LICENSE-APACHE vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/LICENSE-2.0
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

23
pve-rs/vendor/adler2/LICENSE-MIT vendored Normal file
View File

@ -0,0 +1,23 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

46
pve-rs/vendor/adler2/README.md vendored Normal file
View File

@ -0,0 +1,46 @@
# Adler-32 checksums for Rust
This is a fork of the adler crate as the [original](https://github.com/jonas-schievink/adler) has been archived and is no longer updated by it's author
[![crates.io](https://img.shields.io/crates/v/adler.svg)](https://crates.io/crates/adler)
[![docs.rs](https://docs.rs/adler/badge.svg)](https://docs.rs/adler/)
![CI](https://github.com/jonas-schievink/adler/workflows/CI/badge.svg)
This crate provides a simple implementation of the Adler-32 checksum, used in
the zlib compression format.
Please refer to the [changelog](CHANGELOG.md) to see what changed in the last
releases.
## Features
- Permissively licensed (0BSD) clean-room implementation.
- Zero dependencies.
- Zero `unsafe`.
- Decent performance (3-4 GB/s) (see note).
- Supports `#![no_std]` (with `default-features = false`).
## Usage
Add an entry to your `Cargo.toml`:
```toml
[dependencies]
adler2 = "2.0.0"
```
Check the [API Documentation](https://docs.rs/adler/) for how to use the
crate's functionality.
## Rust version support
Currently, this crate supports all Rust versions starting at Rust 1.56.0.
Bumping the Minimum Supported Rust Version (MSRV) is *not* considered a breaking
change, but will not be done without good reasons. The latest 3 stable Rust
versions will always be supported no matter what.
## Performance
Due to the way the algorithm works this crate and the fact that it's not possible to use explicit simd in safe rust currently, this crate benefits drastically from being compiled with newer cpu instructions enabled (using e.g ```RUSTFLAGS=-C target-feature'+sse4.1``` or ```-C target-cpu=x86-64-v2```/```-C target-cpu=x86-64-v3``` arguments depending on what cpu support is being targeted.)
Judging by the crate benchmarks, on a Ryzen 5600, compiling with SSE 4.1 (enabled in x86-64-v2 feature level) enabled can give a ~50-150% speedup, enabling the LZCNT instruction (enabled in x86-64-v3 feature level) can give a further ~50% speedup,

13
pve-rs/vendor/adler2/RELEASE_PROCESS.md vendored Normal file
View File

@ -0,0 +1,13 @@
# What to do to publish a new release
1. Ensure all notable changes are in the changelog under "Unreleased".
2. Execute `cargo release <level>` to bump version(s), tag and publish
everything. External subcommand, must be installed with `cargo install
cargo-release`.
`<level>` can be one of `major|minor|patch`. If this is the first release
(`0.1.0`), use `minor`, since the version starts out as `0.0.0`.
3. Go to the GitHub releases, edit the just-pushed tag. Copy the release notes
from the changelog.

109
pve-rs/vendor/adler2/benches/bench.rs vendored Normal file
View File

@ -0,0 +1,109 @@
extern crate adler2;
extern crate criterion;
use adler2::{adler32_slice, Adler32};
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
fn simple(c: &mut Criterion) {
{
const SIZE: usize = 100;
let mut group = c.benchmark_group("simple-100b");
group.throughput(Throughput::Bytes(SIZE as u64));
group.bench_function("zeroes-100", |bencher| {
bencher.iter(|| {
adler32_slice(&[0; SIZE]);
});
});
group.bench_function("ones-100", |bencher| {
bencher.iter(|| {
adler32_slice(&[0xff; SIZE]);
});
});
}
{
const SIZE: usize = 1024;
let mut group = c.benchmark_group("simple-1k");
group.throughput(Throughput::Bytes(SIZE as u64));
group.bench_function("zeroes-1k", |bencher| {
bencher.iter(|| {
adler32_slice(&[0; SIZE]);
});
});
group.bench_function("ones-1k", |bencher| {
bencher.iter(|| {
adler32_slice(&[0xff; SIZE]);
});
});
}
{
const SIZE: usize = 1024 * 1024;
let mut group = c.benchmark_group("simple-1m");
group.throughput(Throughput::Bytes(SIZE as u64));
group.bench_function("zeroes-1m", |bencher| {
bencher.iter(|| {
adler32_slice(&[0; SIZE]);
});
});
group.bench_function("ones-1m", |bencher| {
bencher.iter(|| {
adler32_slice(&[0xff; SIZE]);
});
});
}
}
fn chunked(c: &mut Criterion) {
const SIZE: usize = 16 * 1024 * 1024;
let data = vec![0xAB; SIZE];
let mut group = c.benchmark_group("chunked-16m");
group.throughput(Throughput::Bytes(SIZE as u64));
group.bench_function("5552", |bencher| {
bencher.iter(|| {
let mut h = Adler32::new();
for chunk in data.chunks(5552) {
h.write_slice(chunk);
}
h.checksum()
});
});
group.bench_function("8k", |bencher| {
bencher.iter(|| {
let mut h = Adler32::new();
for chunk in data.chunks(8 * 1024) {
h.write_slice(chunk);
}
h.checksum()
});
});
group.bench_function("64k", |bencher| {
bencher.iter(|| {
let mut h = Adler32::new();
for chunk in data.chunks(64 * 1024) {
h.write_slice(chunk);
}
h.checksum()
});
});
group.bench_function("1m", |bencher| {
bencher.iter(|| {
let mut h = Adler32::new();
for chunk in data.chunks(1024 * 1024) {
h.write_slice(chunk);
}
h.checksum()
});
});
}
criterion_group!(benches, simple, chunked);
criterion_main!(benches);

155
pve-rs/vendor/adler2/src/algo.rs vendored Normal file
View File

@ -0,0 +1,155 @@
use crate::Adler32;
use std::ops::{AddAssign, MulAssign, RemAssign};
impl Adler32 {
pub(crate) fn compute(&mut self, bytes: &[u8]) {
// The basic algorithm is, for every byte:
// a = (a + byte) % MOD
// b = (b + a) % MOD
// where MOD = 65521.
//
// For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows:
// - Between calls to `write`, we ensure that a and b are always in range 0..MOD.
// - We use 32-bit arithmetic in this function.
// - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD`
// operation.
//
// According to Wikipedia, b is calculated as follows for non-incremental checksumming:
// b = n×D1 + (n1)×D2 + (n2)×D3 + ... + Dn + n*1 (mod 65521)
// Where n is the number of bytes and Di is the i-th Byte. We need to change this to account
// for the previous values of a and b, as well as treat every input Byte as being 255:
// b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520
// Or in other words:
// b_inc = n*65520 + n(n+1)/2*255
// The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521.
// 2^32-65521 = n*65520 + n(n+1)/2*255
// Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552.
//
// On top of the optimization outlined above, the algorithm can also be parallelized with a
// bit more work:
//
// Note that b is a linear combination of a vector of input bytes (D1, ..., Dn).
//
// If we fix some value k<N and rewrite indices 1, ..., N as
//
// 1_1, 1_2, ..., 1_k, 2_1, ..., 2_k, ..., (N/k)_k,
//
// then we can express a and b in terms of sums of smaller sequences kb and ka:
//
// ka(j) := D1_j + D2_j + ... + D(N/k)_j where j <= k
// kb(j) := (N/k)*D1_j + (N/k-1)*D2_j + ... + D(N/k)_j where j <= k
//
// a = ka(1) + ka(2) + ... + ka(k) + 1
// b = k*(kb(1) + kb(2) + ... + kb(k)) - 1*ka(2) - ... - (k-1)*ka(k) + N
//
// We use this insight to unroll the main loop and process k=4 bytes at a time.
// The resulting code is highly amenable to SIMD acceleration, although the immediate speedups
// stem from increased pipeline parallelism rather than auto-vectorization.
//
// This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\
// en/develop/articles/fast-computation-of-fletcher-checksums.html]
const MOD: u32 = 65521;
const CHUNK_SIZE: usize = 5552 * 4;
let mut a = u32::from(self.a);
let mut b = u32::from(self.b);
let mut a_vec = U32X4([0; 4]);
let mut b_vec = a_vec;
let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4);
// iterate over 4 bytes at a time
let chunk_iter = bytes.chunks_exact(CHUNK_SIZE);
let remainder_chunk = chunk_iter.remainder();
for chunk in chunk_iter {
for byte_vec in chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += CHUNK_SIZE as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
}
// special-case the final chunk because it may be shorter than the rest
for byte_vec in remainder_chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += remainder_chunk.len() as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
// combine the sub-sum results into the main sum
b_vec *= 4;
b_vec.0[1] += MOD - a_vec.0[1];
b_vec.0[2] += (MOD - a_vec.0[2]) * 2;
b_vec.0[3] += (MOD - a_vec.0[3]) * 3;
for &av in a_vec.0.iter() {
a += av;
}
for &bv in b_vec.0.iter() {
b += bv;
}
// iterate over the remaining few bytes in serial
for &byte in remainder.iter() {
a += u32::from(byte);
b += a;
}
self.a = (a % MOD) as u16;
self.b = (b % MOD) as u16;
}
}
#[derive(Copy, Clone)]
struct U32X4([u32; 4]);
impl U32X4 {
#[inline]
fn from(bytes: &[u8]) -> Self {
U32X4([
u32::from(bytes[0]),
u32::from(bytes[1]),
u32::from(bytes[2]),
u32::from(bytes[3]),
])
}
}
impl AddAssign<Self> for U32X4 {
#[inline]
fn add_assign(&mut self, other: Self) {
// Implement this in a primitive manner to help out the compiler a bit.
self.0[0] += other.0[0];
self.0[1] += other.0[1];
self.0[2] += other.0[2];
self.0[3] += other.0[3];
}
}
impl RemAssign<u32> for U32X4 {
#[inline]
fn rem_assign(&mut self, quotient: u32) {
self.0[0] %= quotient;
self.0[1] %= quotient;
self.0[2] %= quotient;
self.0[3] %= quotient;
}
}
impl MulAssign<u32> for U32X4 {
#[inline]
fn mul_assign(&mut self, rhs: u32) {
self.0[0] *= rhs;
self.0[1] *= rhs;
self.0[2] *= rhs;
self.0[3] *= rhs;
}
}

287
pve-rs/vendor/adler2/src/lib.rs vendored Normal file
View File

@ -0,0 +1,287 @@
//! Adler-32 checksum implementation.
//!
//! This implementation features:
//!
//! - Permissively licensed (0BSD) clean-room implementation.
//! - Zero dependencies.
//! - Zero `unsafe`.
//! - Decent performance (3-4 GB/s).
//! - `#![no_std]` support (with `default-features = false`).
#![doc(html_root_url = "https://docs.rs/adler2/2.0.0")]
// Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default
#![doc(test(attr(deny(unused_imports, unused_must_use))))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(missing_debug_implementations)]
#![forbid(unsafe_code)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
extern crate core as std;
mod algo;
use std::hash::Hasher;
#[cfg(feature = "std")]
use std::io::{self, BufRead};
/// Adler-32 checksum calculator.
///
/// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default
/// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via
/// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`].
///
/// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any
/// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although
/// that is not recommended (while every checksum is a hash function, they are not necessarily a
/// good one).
///
/// # Examples
///
/// Basic, piecewise checksum calculation:
///
/// ```
/// use adler2::Adler32;
///
/// let mut adler = Adler32::new();
///
/// adler.write_slice(&[0, 1, 2]);
/// adler.write_slice(&[3, 4, 5]);
///
/// assert_eq!(adler.checksum(), 0x00290010);
/// ```
///
/// Using `Hash` to process structures:
///
/// ```
/// use std::hash::Hash;
/// use adler2::Adler32;
///
/// #[derive(Hash)]
/// struct Data {
/// byte: u8,
/// word: u16,
/// big: u64,
/// }
///
/// let mut adler = Adler32::new();
///
/// let data = Data { byte: 0x1F, word: 0xABCD, big: !0 };
/// data.hash(&mut adler);
///
/// // hash value depends on architecture endianness
/// if cfg!(target_endian = "little") {
/// assert_eq!(adler.checksum(), 0x33410990);
/// }
/// if cfg!(target_endian = "big") {
/// assert_eq!(adler.checksum(), 0x331F0990);
/// }
///
/// ```
///
/// [`new`]: #method.new
/// [`from_checksum`]: #method.from_checksum
/// [`checksum`]: #method.checksum
#[derive(Debug, Copy, Clone)]
pub struct Adler32 {
a: u16,
b: u16,
}
impl Adler32 {
/// Creates a new Adler-32 instance with default state.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Creates an `Adler32` instance from a precomputed Adler-32 checksum.
///
/// This allows resuming checksum calculation without having to keep the `Adler32` instance
/// around.
///
/// # Example
///
/// ```
/// # use adler2::Adler32;
/// let parts = [
/// "rust",
/// "acean",
/// ];
/// let whole = adler2::adler32_slice(b"rustacean");
///
/// let mut sum = Adler32::new();
/// sum.write_slice(parts[0].as_bytes());
/// let partial = sum.checksum();
///
/// // ...later
///
/// let mut sum = Adler32::from_checksum(partial);
/// sum.write_slice(parts[1].as_bytes());
/// assert_eq!(sum.checksum(), whole);
/// ```
#[inline]
pub const fn from_checksum(sum: u32) -> Self {
Adler32 {
a: sum as u16,
b: (sum >> 16) as u16,
}
}
/// Returns the calculated checksum at this point in time.
#[inline]
pub fn checksum(&self) -> u32 {
(u32::from(self.b) << 16) | u32::from(self.a)
}
/// Adds `bytes` to the checksum calculation.
///
/// If efficiency matters, this should be called with Byte slices that contain at least a few
/// thousand Bytes.
pub fn write_slice(&mut self, bytes: &[u8]) {
self.compute(bytes);
}
}
impl Default for Adler32 {
#[inline]
fn default() -> Self {
Adler32 { a: 1, b: 0 }
}
}
impl Hasher for Adler32 {
#[inline]
fn finish(&self) -> u64 {
u64::from(self.checksum())
}
fn write(&mut self, bytes: &[u8]) {
self.write_slice(bytes);
}
}
/// Calculates the Adler-32 checksum of a byte slice.
///
/// This is a convenience function around the [`Adler32`] type.
///
/// [`Adler32`]: struct.Adler32.html
pub fn adler32_slice(data: &[u8]) -> u32 {
let mut h = Adler32::new();
h.write_slice(data);
h.checksum()
}
/// Calculates the Adler-32 checksum of a `BufRead`'s contents.
///
/// The passed `BufRead` implementor will be read until it reaches EOF (or until it reports an
/// error).
///
/// If you only have a `Read` implementor, you can wrap it in `std::io::BufReader` before calling
/// this function.
///
/// # Errors
///
/// Any error returned by the reader are bubbled up by this function.
///
/// # Examples
///
/// ```no_run
/// # fn run() -> Result<(), Box<dyn std::error::Error>> {
/// use adler2::adler32;
///
/// use std::fs::File;
/// use std::io::BufReader;
///
/// let file = File::open("input.txt")?;
/// let mut file = BufReader::new(file);
///
/// adler32(&mut file)?;
/// # Ok(()) }
/// # fn main() { run().unwrap() }
/// ```
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub fn adler32<R: BufRead>(mut reader: R) -> io::Result<u32> {
let mut h = Adler32::new();
loop {
let len = {
let buf = reader.fill_buf()?;
if buf.is_empty() {
return Ok(h.checksum());
}
h.write_slice(buf);
buf.len()
};
reader.consume(len);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn zeroes() {
assert_eq!(adler32_slice(&[]), 1);
assert_eq!(adler32_slice(&[0]), 1 | 1 << 16);
assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16);
assert_eq!(adler32_slice(&[0; 100]), 0x00640001);
assert_eq!(adler32_slice(&[0; 1024]), 0x04000001);
assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001);
}
#[test]
fn ones() {
assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e);
assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11);
}
#[test]
fn mixed() {
assert_eq!(adler32_slice(&[1]), 2 | 2 << 16);
assert_eq!(adler32_slice(&[40]), 41 | 41 << 16);
assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398);
}
#[test]
fn resume() {
let mut adler = Adler32::new();
adler.write_slice(&[0xff; 1024]);
let partial = adler.checksum();
assert_eq!(partial, 0x79a6fc2e); // from above
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
// Make sure that we can resume computing from the partial checksum via `from_checksum`.
let mut adler = Adler32::from_checksum(partial);
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
}
#[cfg(feature = "std")]
#[test]
fn bufread() {
use std::io::BufReader;
fn test(data: &[u8], checksum: u32) {
// `BufReader` uses an 8 KB buffer, so this will test buffer refilling.
let mut buf = BufReader::new(data);
let real_sum = adler32(&mut buf).unwrap();
assert_eq!(checksum, real_sum);
}
test(&[], 1);
test(&[0; 1024], 0x04000001);
test(&[0; 1024 * 1024], 0x00f00001);
test(&[0xA5; 1024 * 1024], 0xd5009ab1);
}
}

View File

@ -1 +1 @@
{"files":{"CHANGELOG.md":"b4d01c4b8a790e435dc0ab67a1ef8b6d8e39f87bec233540e247ef313737d855","COPYING":"aacc8f585552509941b8531442e43a8e3e1aabc7d92f1ff0736250b80f65361c","Cargo.toml":"2970819aad5ebe0a89647be22da6613623199255ff0d7dbb9238927ffea696f1","README.md":"85cecaf786f948c26510911416d7e0ab4c4f10367d963cad011589648084a986","license/APACHE":"65071d88cda37097d5579c272cf0db48b23acc4e2fe3ad16a5985cd714753cbc","license/MIT":"74d0d1e38a980edecb7c71d33f2056456e2cb6c37c16bd05a882d714b5e56661","src/lib.rs":"fc1294b60cbf4d9ca3f61a43b86aac9533cd6b5b87729a9bd32f7992186d1a49","src/nightly.rs":"fc84f98e2014bef66bd54671d8ec98db973fb46b80fb271d6783eb00d1f95228","src/stable/alloc/global.rs":"411208558701915ff0f7cf7ef6c64b8a3bc932944416c26fd832d03d10a76502","src/stable/alloc/mod.rs":"63db909472169a70ad5332f33f67b88e9ea361c13725c65540d7003c83d8d226","src/stable/alloc/system.rs":"7c9145f594869c3cb934e97d3eda1b0b8ed6bd8ba89b1aea7435fc6680465b6b","src/stable/boxed.rs":"2ac1c0ba02149192030460c2be123cf0614c4fb60ce6bc483cc2b62cce83c22a","src/stable/macros.rs":"ce3915ce7ee003d8790c695d70a4e77b1e63a908a5ae0825169270d0f4ab5941","src/stable/mod.rs":"fc44985d0d999e2bd52693a49bb1796451c0a9a2e6d4f7565629392a38ca54e1","src/stable/raw_vec.rs":"bc1cb45b661ae5786912d625351e6e0d33aac8e4edaf36873874184a136cd89c","src/stable/slice.rs":"14d6eb35e3557b5f78feb48fd4bea343f037e8f1f2d2707089db4dbed438b558","src/stable/vec/drain.rs":"f8209cbd76a57823f6583a84fee285727b6c00189ec299acc9f97a0829f0742f","src/stable/vec/into_iter.rs":"9b0e58c8cd6c34b3c706696cb9508c977cbfaa0eeb32d13f799a82520b5cd490","src/stable/vec/mod.rs":"19a6772a4e3053c55c83dd774d3b0154852080bb58734dd49052a4175f0b4df1","src/stable/vec/partial_eq.rs":"cb88615747b4413f26dcab206e026bbd50150bf7d97d8df174384e86151d875e","src/stable/vec/set_len_on_drop.rs":"36f2e8fdc9b0a838eb443d74bec0291d389e52bfe4f617e391d977f15e6893b5","src/stable/vec/splice.rs":"7ce9fa74764c36ab9043f7339548e96b0b68f7d1a16769c9cb066b9a538dcb14"},"package":"0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"}
{"files":{"CHANGELOG.md":"886f8c688db0c22d24b650df0dc30a39d05d54d0e562c00d9574bf31cbf73251","Cargo.toml":"8c0d2ca1b5a6063aedcd462337eeb4dfb755d81e8d132fe810419689e45eef42","LICENSE-APACHE":"20fe7b00e904ed690e3b9fd6073784d3fc428141dbd10b81c01fd143d0797f58","LICENSE-MIT":"36516aefdc84c5d5a1e7485425913a22dbda69eb1930c5e84d6ae4972b5194b9","README.md":"7bf09e77c1d8e9292992b717d88e33d031439aa31dc9e7bb617464270519b051","src/lib.rs":"c937309febe24f97bc637650137311d5b8097b8574b0e973f4d6fb591c3448f7","src/nightly.rs":"fcff4d236e23bc95b1ce2c00140807ba3698cc01233d910d65d74986bb36f161","src/stable/alloc/global.rs":"14836ad7d73a364474fc153b24a1f17ad0e60a69b90a8721dc1059eada8bf869","src/stable/alloc/mod.rs":"866dafd3984dd246e381d8ad1c2b3e02a60c3421b598ca493aa83f9b6422608d","src/stable/alloc/system.rs":"db5d5bf088eecac3fc5ff1281e1bf26ca36dd38f13cd52c49d95ff1bab064254","src/stable/boxed.rs":"8b9b7f4cebbc1629c478dce0dd8227db16508e1383f24490d32eab7aeb3a0cea","src/stable/macros.rs":"74490796a766338d0163f40a37612cd9ea2de58ae3d8e9abf6c7bcf81d9be4a6","src/stable/mod.rs":"a6a724e10e4db4e3b7960c65bac803152a1115af46b898ff8a61e486365c16c7","src/stable/raw_vec.rs":"8cc0e3e4d5fd21e0e83776ff21c576cbb87b69647903ee9b8f5372f8781a7328","src/stable/slice.rs":"089263b058e6c185467bad7ad14908479e5675408fc70a8291e5dddaef36035a","src/stable/vec/drain.rs":"740cd2e0f31eeb0146bbd0f645a14fe12bacd3912f003db433ddc6b3a178461f","src/stable/vec/into_iter.rs":"88c22b09682cd90c7362d702d0501566173b2d836cf82a2b92ae11fdef5b9435","src/stable/vec/mod.rs":"1561b75d0bbcdf64f47bd7f1661088b68796f0e7e02a4e9391d8a50010b86f6b","src/stable/vec/partial_eq.rs":"9f1b18605164a62b58d9e17914d573698735de31c51ceb8bd3666e83d32df370","src/stable/vec/set_len_on_drop.rs":"561342e22a194e515cc25c9a1bcd827ca24c4db033e9e2c4266fbdd2fb16e5bc","src/stable/vec/splice.rs":"95a460b3a7b4af60fdc9ba04d3a719b61a0c11786cd2d8823d022e22c397f9c9"},"package":"5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"}

View File

@ -1,6 +0,0 @@
Copyright 2023 The allocator-api2 Project Developers
Licensed under the Apache License, Version 2.0, <license/LICENSE-APACHE or
http://apache.org/licenses/LICENSE-2.0> or the MIT license <license/LICENSE-MIT or
http://opensource.org/licenses/MIT>, at your option. This file may not be
copied, modified, or distributed except according to those terms.

View File

@ -12,7 +12,7 @@
[package]
edition = "2018"
name = "allocator-api2"
version = "0.2.16"
version = "0.2.18"
authors = ["Zakarum <zaq.dev@icloud.com>"]
description = "Mirror of Rust's allocator API"
homepage = "https://github.com/zakarumych/allocator-api2"

View File

@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -0,0 +1,23 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -40,8 +40,8 @@ when some other crate enables "allocator-api2/nightly" feature.
Licensed under either of
* Apache License, Version 2.0, ([license/APACHE](license/APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([license/MIT](license/MIT) or http://opensource.org/licenses/MIT)
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.

View File

@ -1,13 +0,0 @@
Copyright 2023 The allocator-api2 project developers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,25 +0,0 @@
Copyright (c) 2023 The allocator-api2 project developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -1,7 +1,6 @@
use core::ptr::NonNull;
#[doc(inline)]
pub use alloc_crate::alloc::{alloc, alloc_zeroed, dealloc, handle_alloc_error, realloc};
use alloc_crate::alloc::{alloc, alloc_zeroed, dealloc, realloc};
use crate::stable::{assume, invalid_mut};

View File

@ -55,7 +55,7 @@
#[macro_export]
macro_rules! vec {
(in $alloc:expr $(;)?) => (
$crate::vec::Vec::new()
$crate::vec::Vec::new_in($alloc)
);
(in $alloc:expr; $elem:expr; $n:expr) => (
$crate::vec::from_elem_in($elem, $n, $alloc)

View File

@ -67,6 +67,9 @@ use core::ops::{self, Bound, Index, IndexMut, Range, RangeBounds};
use core::ptr::{self, NonNull};
use core::slice::{self, SliceIndex};
#[cfg(feature = "std")]
use std::io;
use super::{
alloc::{Allocator, Global},
assume,
@ -3115,6 +3118,38 @@ pub fn from_elem<T: Clone>(elem: T, n: usize) -> Vec<T> {
v
}
/// Write is implemented for `Vec<u8>` by appending to the vector.
/// The vector will grow as needed.
#[cfg(feature = "std")]
impl<A: Allocator> io::Write for Vec<u8, A> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.extend_from_slice(buf);
Ok(buf.len())
}
#[inline]
fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
let len = bufs.iter().map(|b| b.len()).sum();
self.reserve(len);
for buf in bufs {
self.extend_from_slice(buf);
}
Ok(len)
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.extend_from_slice(buf);
Ok(())
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
#[cfg(feature = "serde")]
impl<T, A> serde::Serialize for Vec<T, A>
where

View File

@ -1 +1 @@
{"files":{"Cargo.toml":"94ef33fb656ff0f7acc987633f36a283d36cec6662ba794051c11a73227d2853","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"0999fd320cdb3300dd22ff03b97cd41945cc9833a02a5b7c53ed36ab6d6f66e7","build.rs":"06f0353f62c32eac9942570b9163834e7ddcdc760d4bd4e7c2d98257bb0b647e","build/probe.rs":"b8b792036f13c9c1fbc6b1244198ea2305e61ddfcda3856563b581dcb1e1fe6e","rust-toolchain.toml":"6bbb61302978c736b2da03e4fb40e3beab908f85d533ab46fd541e637b5f3e0f","src/backtrace.rs":"f3bcaddc8665442611ae0f31ce7e9c19ede55d4a3022ffeb16f4c982bca7f018","src/chain.rs":"e83ee788dc5bb6bebba64356b35abbd5b624f2449d47e3716d02b2473fb8e5c5","src/context.rs":"04e4b80b9f6d8163edc53455b98ab0c40cb9ad104bcf0c74f8075f22024471ab","src/ensure.rs":"d4c2588608e529275bfee1c1afc8860d7def868ab01e95a692a995ee06b2936e","src/error.rs":"a274234af662770340b237c9016beea2f94e7906f3fe69d0d78057929889f25b","src/fmt.rs":"adf4be906b29900153bfb4b767a6049d58697dc3bcce7dfbb85ca773f5de5b33","src/kind.rs":"febaac378b8bfca7b95a76c9bf9fbd8ccdfd0cc1a6277cdc109a2cda5763f16e","src/lib.rs":"cd9e127c9762602cc86d81f7b4946248a108ab5e3dcd8bfd1c06fbffcadd6f69","src/macros.rs":"dd35f2ec2a0a25e4504fb04bcd42f6d0963bc0035aaaefc412f5ee1d78945fe1","src/ptr.rs":"4cb31d2f815b178daf951bfb94a1930383e056c0ca68d494603f45d8eea35d50","src/wrapper.rs":"4ffdf284d45fee90f11a48e59c493ed1114e1243903ceb265fa89cd4c8c0d338","tests/common/mod.rs":"f9088c2d7afafa64ff730b629272045b776bfafc2f5957508242da630635f2e1","tests/compiletest.rs":"022a8e400ef813d7ea1875b944549cee5125f6a995dc33e93b48cba3e1b57bd1","tests/drop/mod.rs":"08c3e553c1cc0d2dbd936fc45f4b5b1105057186affd6865e8d261e05f0f0646","tests/test_autotrait.rs":"ecccf9202a33611f64b76598806aa82abec2560ae058e32f63fb2fb3ef225be9","tests/test_backtrace.rs":"ed144f90bf62cc441de067f6cee09ece94bca9da8f9b492d11d3dc652ba83e26","tests/test_boxed.rs":"6b26db0e2eb72afe9af7352ea820837aab90f8d486294616dd5dc34c1b94038c","tests/test_chain.rs":"3a8a8d7569913bd98c0e27c69d0bda35101e7fde7c056ed57cdd8ed018e4cbcb","tests/test_context.rs":"8409c53b328562c11e822bd6c3cd17e0d4d50b9bbb8fc3617333fd77303a6a33","tests/test_convert.rs":"7e7a8b4772a427a911014ac4d1083f9519000e786177f898808980dd9bdfde61","tests/test_downcast.rs":"797e69a72d125758c4c4897e5dc776d549d52cc9a6a633e0a33193f588a62b88","tests/test_ensure.rs":"89bb15f2a6288037bcf6ad976705038d9bea714da4244dee8314c720f88393c8","tests/test_ffi.rs":"d0cb4c1d6d9154090982dee72ae3ebe05a5981f976058c3250f1c9da5a45edef","tests/test_fmt.rs":"81b14dd207ba5fbf02aaed031646810906c9c9c2fc5cabffc8e88f82462be499","tests/test_macros.rs":"5172f82a6ad25b75f668f6419d161de3d12b8666d54701739fe33e86e4ddf148","tests/test_repr.rs":"034dee888abd08741e11ac2e95ef4fcb2ab3943d0a76e8e976db404658e1a252","tests/test_source.rs":"b80723cf635a4f8c4df21891b34bfab9ed2b2aa407e7a2f826d24e334cd5f88e","tests/ui/chained-comparison.rs":"6504b03d95b5acc232a7f4defc9f343b2be6733bf475fa0992e8e6545b912bd4","tests/ui/chained-comparison.stderr":"7f1d0a8c251b0ede2d30b3087ec157fc660945c97a642c4a5acf5a14ec58de34","tests/ui/empty-ensure.rs":"ab5bf37c846a0d689f26ce9257a27228411ed64154f9c950f1602d88a355d94b","tests/ui/empty-ensure.stderr":"315782f5f4246290fe190e3767b22c3dcaffaabc19c5ace0373537d53e765278","tests/ui/must-use.rs":"fb59860b43f673bf4a430a6036ba463e95028844d8dd4243cfe5ebc7f2be582f","tests/ui/must-use.stderr":"c2848c5f254b4c061eea6714d9baf709924aba06619eaf2a8b3aee1266b75f9e","tests/ui/no-impl.rs":"fab6cbf2f6ea510b86f567dfb3b7c31250a9fd71ae5d110dbb9188be569ec593","tests/ui/no-impl.stderr":"6f1fd1c620a1319e98b261e4a42b9346c28154310c5bb3dc6d6ded72fbdf638d","tests/ui/temporary-value.rs":"4dcc96271b2403e6372cf4cfc813445e5ce4365fc6e156b6bc38274098499a70","tests/ui/temporary-value.stderr":"171f6c1c962503855480696e5d39e68946ec2a027b61a6f36ca1ad1b40265c5d","tests/ui/wrong-interpolation.rs":"9c44d4674c2dccd27b9dedd03341346ec02d993b41793ee89b5755202e7e367e","tests/ui/wrong-interpolation.stderr":"301e60e2eb9401782c7dc0b3580613a4cb2aafd4cc8065734a630a62e1161aa5"},"package":"0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247"}
{"files":{"Cargo.toml":"aa2d7538e2b3a969c60be518b72b524952d63b039811e8ab3791f1fbeb2645f5","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"6e608b9068b4ebab4b85944f047ea386f0288a99c615ac4a32ac45a7da4e5e33","build.rs":"22179bef46f3656c2436c97c2ac9751b26bbd04b67686ea6d75652aa92551fa7","build/probe.rs":"b8b792036f13c9c1fbc6b1244198ea2305e61ddfcda3856563b581dcb1e1fe6e","rust-toolchain.toml":"6bbb61302978c736b2da03e4fb40e3beab908f85d533ab46fd541e637b5f3e0f","src/backtrace.rs":"ea40abc828b93d01272c2d9f4254502ed34934e8c86fc2eac883dd972a09d8b2","src/chain.rs":"e83ee788dc5bb6bebba64356b35abbd5b624f2449d47e3716d02b2473fb8e5c5","src/context.rs":"04e4b80b9f6d8163edc53455b98ab0c40cb9ad104bcf0c74f8075f22024471ab","src/ensure.rs":"9763f418b5397764549866c111ec6db3a7bdc4c30ad95c3bbfc56c5434ea8c09","src/error.rs":"a274234af662770340b237c9016beea2f94e7906f3fe69d0d78057929889f25b","src/fmt.rs":"adf4be906b29900153bfb4b767a6049d58697dc3bcce7dfbb85ca773f5de5b33","src/kind.rs":"febaac378b8bfca7b95a76c9bf9fbd8ccdfd0cc1a6277cdc109a2cda5763f16e","src/lib.rs":"1c774243700f38ccaced1609c9e37a25c01f5e8aa900b876aef206207d6e4846","src/macros.rs":"17fb103e68a5befa768857314c3ca4d81d1dacfea0738bbb9c7597a485243499","src/ptr.rs":"4cb31d2f815b178daf951bfb94a1930383e056c0ca68d494603f45d8eea35d50","src/wrapper.rs":"4ffdf284d45fee90f11a48e59c493ed1114e1243903ceb265fa89cd4c8c0d338","tests/common/mod.rs":"f9088c2d7afafa64ff730b629272045b776bfafc2f5957508242da630635f2e1","tests/compiletest.rs":"022a8e400ef813d7ea1875b944549cee5125f6a995dc33e93b48cba3e1b57bd1","tests/drop/mod.rs":"08c3e553c1cc0d2dbd936fc45f4b5b1105057186affd6865e8d261e05f0f0646","tests/test_autotrait.rs":"ecccf9202a33611f64b76598806aa82abec2560ae058e32f63fb2fb3ef225be9","tests/test_backtrace.rs":"ed144f90bf62cc441de067f6cee09ece94bca9da8f9b492d11d3dc652ba83e26","tests/test_boxed.rs":"6b26db0e2eb72afe9af7352ea820837aab90f8d486294616dd5dc34c1b94038c","tests/test_chain.rs":"3a8a8d7569913bd98c0e27c69d0bda35101e7fde7c056ed57cdd8ed018e4cbcb","tests/test_context.rs":"8409c53b328562c11e822bd6c3cd17e0d4d50b9bbb8fc3617333fd77303a6a33","tests/test_convert.rs":"7e7a8b4772a427a911014ac4d1083f9519000e786177f898808980dd9bdfde61","tests/test_downcast.rs":"797e69a72d125758c4c4897e5dc776d549d52cc9a6a633e0a33193f588a62b88","tests/test_ensure.rs":"a9a21c72ebcafd76e159b0297fb51fe75b7323f65b46a1f8a9e015c9515abdb0","tests/test_ffi.rs":"d0cb4c1d6d9154090982dee72ae3ebe05a5981f976058c3250f1c9da5a45edef","tests/test_fmt.rs":"81b14dd207ba5fbf02aaed031646810906c9c9c2fc5cabffc8e88f82462be499","tests/test_macros.rs":"68673942662a43bceee62aaed69c25d7ddbc55e25d62d528e13033c3e2e756cd","tests/test_repr.rs":"034dee888abd08741e11ac2e95ef4fcb2ab3943d0a76e8e976db404658e1a252","tests/test_source.rs":"b80723cf635a4f8c4df21891b34bfab9ed2b2aa407e7a2f826d24e334cd5f88e","tests/ui/chained-comparison.rs":"6504b03d95b5acc232a7f4defc9f343b2be6733bf475fa0992e8e6545b912bd4","tests/ui/chained-comparison.stderr":"7f1d0a8c251b0ede2d30b3087ec157fc660945c97a642c4a5acf5a14ec58de34","tests/ui/empty-ensure.rs":"ab5bf37c846a0d689f26ce9257a27228411ed64154f9c950f1602d88a355d94b","tests/ui/empty-ensure.stderr":"315782f5f4246290fe190e3767b22c3dcaffaabc19c5ace0373537d53e765278","tests/ui/ensure-nonbool.rs":"7e57cb93fbcd82959b36586ed6bd2ad978b051fe5facd5274651fde6b1600905","tests/ui/ensure-nonbool.stderr":"0b4d1611e3bb65081bf38c1e49b1f12e5096738f276608661016e68f1fe13f7c","tests/ui/must-use.rs":"fb59860b43f673bf4a430a6036ba463e95028844d8dd4243cfe5ebc7f2be582f","tests/ui/must-use.stderr":"c2848c5f254b4c061eea6714d9baf709924aba06619eaf2a8b3aee1266b75f9e","tests/ui/no-impl.rs":"fab6cbf2f6ea510b86f567dfb3b7c31250a9fd71ae5d110dbb9188be569ec593","tests/ui/no-impl.stderr":"0d8ed712d25de898eae18cfdffc575a47f4d5596346058cf6cd50d016c4f8ce8","tests/ui/temporary-value.rs":"4dcc96271b2403e6372cf4cfc813445e5ce4365fc6e156b6bc38274098499a70","tests/ui/temporary-value.stderr":"171f6c1c962503855480696e5d39e68946ec2a027b61a6f36ca1ad1b40265c5d","tests/ui/wrong-interpolation.rs":"9c44d4674c2dccd27b9dedd03341346ec02d993b41793ee89b5755202e7e367e","tests/ui/wrong-interpolation.stderr":"301e60e2eb9401782c7dc0b3580613a4cb2aafd4cc8065734a630a62e1161aa5"},"package":"b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"}

View File

@ -13,8 +13,13 @@
edition = "2018"
rust-version = "1.39"
name = "anyhow"
version = "1.0.81"
version = "1.0.86"
authors = ["David Tolnay <dtolnay@gmail.com>"]
build = "build.rs"
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Flexible concrete Error type built on std::error::Error"
documentation = "https://docs.rs/anyhow"
readme = "README.md"
@ -38,8 +43,66 @@ rustdoc-args = [
targets = ["x86_64-unknown-linux-gnu"]
[lib]
name = "anyhow"
path = "src/lib.rs"
doc-scrape-examples = false
[[test]]
name = "test_ensure"
path = "tests/test_ensure.rs"
[[test]]
name = "test_chain"
path = "tests/test_chain.rs"
[[test]]
name = "test_fmt"
path = "tests/test_fmt.rs"
[[test]]
name = "test_source"
path = "tests/test_source.rs"
[[test]]
name = "test_repr"
path = "tests/test_repr.rs"
[[test]]
name = "test_autotrait"
path = "tests/test_autotrait.rs"
[[test]]
name = "test_boxed"
path = "tests/test_boxed.rs"
[[test]]
name = "test_backtrace"
path = "tests/test_backtrace.rs"
[[test]]
name = "test_convert"
path = "tests/test_convert.rs"
[[test]]
name = "test_macros"
path = "tests/test_macros.rs"
[[test]]
name = "compiletest"
path = "tests/compiletest.rs"
[[test]]
name = "test_context"
path = "tests/test_context.rs"
[[test]]
name = "test_ffi"
path = "tests/test_ffi.rs"
[[test]]
name = "test_downcast"
path = "tests/test_downcast.rs"
[dependencies.backtrace]
version = "0.3.51"
optional = true

View File

@ -26,7 +26,7 @@ anyhow = "1.0"
return type of any fallible function.
Within the function, use `?` to easily propagate any error that implements the
`std::error::Error` trait.
[`std::error::Error`] trait.
```rust
use anyhow::Result;
@ -38,6 +38,8 @@ anyhow = "1.0"
}
```
[`std::error::Error`]: https://doc.rust-lang.org/std/error/trait.Error.html
- Attach context to help the person troubleshooting the error understand where
things went wrong. A low-level error like "No such file or directory" can be
annoying to debug without more context about what higher level step the
@ -125,8 +127,8 @@ anyhow = "1.0"
## No-std support
In no_std mode, the same API is almost all available and works the same way. To
depend on Anyhow in no_std mode, disable our default enabled "std" feature in
In no_std mode, almost all of the same API is available and works the same way.
To depend on Anyhow in no_std mode, disable our default enabled "std" feature in
Cargo.toml. A global allocator is required.
```toml

View File

@ -1,5 +1,6 @@
use std::env;
use std::ffi::OsString;
use std::iter;
use std::path::Path;
use std::process::{self, Command, Stdio};
use std::str;
@ -64,6 +65,16 @@ fn main() {
None => return,
};
if rustc >= 80 {
println!("cargo:rustc-check-cfg=cfg(anyhow_nightly_testing)");
println!("cargo:rustc-check-cfg=cfg(anyhow_no_fmt_arguments_as_str)");
println!("cargo:rustc-check-cfg=cfg(anyhow_no_ptr_addr_of)");
println!("cargo:rustc-check-cfg=cfg(anyhow_no_unsafe_op_in_unsafe_fn_lint)");
println!("cargo:rustc-check-cfg=cfg(doc_cfg)");
println!("cargo:rustc-check-cfg=cfg(error_generic_member_access)");
println!("cargo:rustc-check-cfg=cfg(std_backtrace)");
}
if rustc < 51 {
// core::ptr::addr_of
// https://blog.rust-lang.org/2021/03/25/Rust-1.51.0.html#stabilized-apis
@ -103,15 +114,15 @@ fn compile_probe(rustc_bootstrap: bool) -> bool {
let out_dir = cargo_env_var("OUT_DIR");
let probefile = Path::new("build").join("probe.rs");
// Make sure to pick up Cargo rustc configuration.
let mut cmd = if let Some(wrapper) = env::var_os("RUSTC_WRAPPER") {
let mut cmd = Command::new(wrapper);
// The wrapper's first argument is supposed to be the path to rustc.
cmd.arg(rustc);
cmd
} else {
Command::new(rustc)
};
let rustc_wrapper = env::var_os("RUSTC_WRAPPER").filter(|wrapper| !wrapper.is_empty());
let rustc_workspace_wrapper =
env::var_os("RUSTC_WORKSPACE_WRAPPER").filter(|wrapper| !wrapper.is_empty());
let mut rustc = rustc_wrapper
.into_iter()
.chain(rustc_workspace_wrapper)
.chain(iter::once(rustc));
let mut cmd = Command::new(rustc.next().unwrap());
cmd.args(rustc);
if !rustc_bootstrap {
cmd.env_remove("RUSTC_BOOTSTRAP");

View File

@ -65,7 +65,8 @@ macro_rules! backtrace_if_absent {
#[cfg(all(not(std_backtrace), feature = "backtrace"))]
mod capture {
use alloc::borrow::Cow;
use alloc::borrow::{Cow, ToOwned as _};
use alloc::vec::Vec;
use backtrace::{BacktraceFmt, BytesOrWideString, Frame, PrintFmt, SymbolName};
use core::cell::UnsafeCell;
use core::fmt::{self, Debug, Display};

View File

@ -147,8 +147,8 @@ macro_rules! __parse_ensure {
$crate::__parse_ensure!(pat $stack $bail ($($fuel)*) {($($buf)* $let) $($parse)*} ($($rest)*) $($rest)*)
};
(0 $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($life:tt $colon:tt $($dup:tt)*) $label:lifetime : $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $life $colon) $($parse)*} ($($rest)*) $($rest)*)
(0 $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($lifetime:tt $colon:tt $($dup:tt)*) $label:lifetime : $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $lifetime $colon) $($parse)*} ($($rest)*) $($rest)*)
};
(0 $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $mut:tt $($dup:tt)*) &mut $($rest:tt)*) => {
@ -236,17 +236,17 @@ macro_rules! __parse_ensure {
$crate::__parse_ensure!(atom $stack $bail ($($fuel)*) {($($buf)* $const $block) $($parse)*} ($($rest)*) $($rest)*)
};
(0 $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(atom $stack $bail ($($fuel)*) {($($buf)* $lit) $($parse)*} ($($rest)*) $($rest)*)
(0 $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($literal:tt $($dup:tt)*) $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(atom $stack $bail ($($fuel)*) {($($buf)* $literal) $($parse)*} ($($rest)*) $($rest)*)
};
// path expressions
(0 $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $($dup:tt)*) :: $ident:ident $($rest:tt)*) => {
(0 $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $ident:tt $($dup:tt)*) :: $i:ident $($rest:tt)*) => {
$crate::__parse_ensure!(epath (atom $stack) $bail ($($fuel)*) {($($buf)* $colons $ident) $($parse)*} ($($rest)*) $($rest)*)
};
(0 $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt $ident:ident $($rest:tt)*) => {
(0 $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($ident:tt $($dup:tt)*) $i:ident $($rest:tt)*) => {
$crate::__parse_ensure!(epath (atom $stack) $bail ($($fuel)*) {($($buf)* $ident) $($parse)*} ($($rest)*) $($rest)*)
};
@ -258,15 +258,19 @@ macro_rules! __parse_ensure {
$crate::__parse_ensure!(generic (epath $stack) $bail ($($fuel)*) {($($buf)* $colons $langle) $($parse)*} ($($rest)*) $($rest)*)
};
(epath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $($dup:tt)*) :: << $($rest:tt)*) => {
$crate::__parse_ensure!(generic (epath $stack) $bail ($($fuel)*) {($($buf)* $colons <) $($parse)*} (< $($rest)*) < $($rest)*)
(epath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $langle:tt $($dup:tt)*) :: << $($rest:tt)*) => {
$crate::__parse_ensure!(type (qpath (tpath (arglist (epath $stack)))) $bail ($($fuel)*) {($($buf)* $colons $langle) $($parse)*} ($($rest)*) $($rest)*)
};
(epath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $($dup:tt)*) :: <- $($rest:tt)*) => {
$crate::__parse_ensure!(generic (epath $stack) $bail ($($fuel)*) {($($buf)* $colons <) $($parse)*} (- $($rest)*) - $($rest)*)
(epath $stack:tt ($($bail:tt)*) (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt :: <- - $($rest:tt)*) => {
$crate::__fallback_ensure!($($bail)*)
};
(epath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $($dup:tt)*) :: $ident:ident $($rest:tt)*) => {
(epath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $larrow:tt $($dup:tt)*) :: <- $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(generic (epath $stack) $bail ($($fuel)*) {($($buf)* $colons $larrow) $($parse)*} ($($dup)*) $($dup)*)
};
(epath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $ident:tt $($dup:tt)*) :: $i:ident $($rest:tt)*) => {
$crate::__parse_ensure!(epath $stack $bail ($($fuel)*) {($($buf)* $colons $ident) $($parse)*} ($($rest)*) $($rest)*)
};
@ -282,6 +286,10 @@ macro_rules! __parse_ensure {
$crate::__parse_ensure!($pop $stack $bail ($($fuel)*) {($($buf)* $bang $args) $($parse)*} ($($rest)*) $($rest)*)
};
(epath (split ($pop:ident $stack:tt)) $bail:tt (~$($fuel:tt)*) $parse:tt $dup:tt $($rest:tt)*) => {
$crate::__parse_ensure!($pop (split $stack) $bail ($($fuel)*) $parse $dup $($rest)*)
};
(epath ($pop:ident $stack:tt) $bail:tt (~$($fuel:tt)*) $parse:tt $dup:tt $($rest:tt)*) => {
$crate::__parse_ensure!($pop $stack $bail ($($fuel)*) $parse $dup $($rest)*)
};
@ -308,18 +316,26 @@ macro_rules! __parse_ensure {
$crate::__parse_ensure!(generic (atom $stack) $bail ($($fuel)*) {($($buf)* $dot $ident $colons $langle) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($dot:tt $ident:tt $colons:tt $($dup:tt)*) . $i:ident :: << $($rest:tt)*) => {
$crate::__parse_ensure!(generic (atom $stack) $bail ($($fuel)*) {($($buf)* $dot $ident $colons <) $($parse)*} (< $($rest)*) < $($rest)*)
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($dot:tt $ident:tt $colons:tt $langle:tt $($dup:tt)*) . $i:ident :: << $($rest:tt)*) => {
$crate::__parse_ensure!(type (qpath (tpath (arglist (atom $stack)))) $bail ($($fuel)*) {($($buf)* $dot $ident $colons $langle) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($dot:tt $ident:tt $colons:tt $($dup:tt)*) . $i:ident :: <- $($rest:tt)*) => {
$crate::__parse_ensure!(generic (atom $stack) $bail ($($fuel)*) {($($buf)* $dot $ident $colons <) $($parse)*} (- $($rest)*) - $($rest)*)
(atom $stack:tt ($($bail:tt)*) (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt . $i:ident :: <- - $($rest:tt)*) => {
$crate::__fallback_ensure!($($bail)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($dot:tt $($dup:tt)*) . $field:ident $($rest:tt)*) => {
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($dot:tt $ident:tt $colons:tt $larrow:tt $($dup:tt)*) . $i:ident :: <- $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(generic (atom $stack) $bail ($($fuel)*) {($($buf)* $dot $ident $colons $larrow) $($parse)*} ($($dup)*) $($dup)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($dot:tt $field:tt $($dup:tt)*) . $i:ident $($rest:tt)*) => {
$crate::__parse_ensure!(atom $stack $bail ($($fuel)*) {($($buf)* $dot $field) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt ($($bail:tt)*) (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt . - $($rest:tt)*) => {
$crate::__fallback_ensure!($($bail)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($dot:tt $index:tt $($dup:tt)*) . $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(atom $stack $bail ($($fuel)*) {($($buf)* $dot $index) $($parse)*} ($($rest)*) $($rest)*)
};
@ -346,43 +362,51 @@ macro_rules! __parse_ensure {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $star $mut) $($parse)*} ($($rest)*) $($rest)*)
};
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $life:lifetime $mut:tt $($dup:tt)*) & $l:lifetime mut $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $and $life $mut) $($parse)*} ($($rest)*) $($rest)*)
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $lifetime:tt $mut:tt $($dup:tt)*) & $l:lifetime mut $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $and $lifetime $mut) $($parse)*} ($($rest)*) $($rest)*)
};
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $mut:tt $($dup:tt)*) & mut $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $and $mut) $($parse)*} ($($rest)*) $($rest)*)
};
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $life:lifetime $($dup:tt)*) & $l:lifetime $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $and $life) $($parse)*} ($($rest)*) $($rest)*)
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $lifetime:tt $($dup:tt)*) & $l:lifetime $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $and $lifetime) $($parse)*} ($($rest)*) $($rest)*)
};
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $($dup:tt)*) & $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $and) $($parse)*} ($($rest)*) $($rest)*)
};
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $life:lifetime $mut:tt $($dup:tt)*) && $l:lifetime mut $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $and $life $mut) $($parse)*} ($($rest)*) $($rest)*)
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $lifetime:tt $mut:tt $($dup:tt)*) && $l:lifetime mut $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $and $lifetime $mut) $($parse)*} ($($rest)*) $($rest)*)
};
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $mut:tt $($dup:tt)*) && mut $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $and $mut) $($parse)*} ($($rest)*) $($rest)*)
};
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $life:lifetime $($dup:tt)*) && $l:lifetime $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $and $life) $($parse)*} ($($rest)*) $($rest)*)
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $lifetime:tt $($dup:tt)*) && $l:lifetime $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $and $lifetime) $($parse)*} ($($rest)*) $($rest)*)
};
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $($dup:tt)*) && $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $and) $($parse)*} ($($rest)*) $($rest)*)
};
(type $stack:tt ($($bail:tt)*) (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt unsafe extern - $($rest:tt)*) => {
$crate::__fallback_ensure!($($bail)*)
};
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($unsafe:tt $(extern $($abi:literal)?)? fn $($dup:tt)*) unsafe $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $unsafe) $($parse)*} ($($rest)*) $($rest)*)
};
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($extern:tt $abi:literal fn $($dup:tt)*) extern $lit:literal $($rest:tt)*) => {
(type $stack:tt ($($bail:tt)*) (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt extern - $($rest:tt)*) => {
$crate::__fallback_ensure!($($bail)*)
};
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($extern:tt $abi:tt fn $($dup:tt)*) extern $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(type $stack $bail ($($fuel)*) {($($buf)* $extern $abi) $($parse)*} ($($rest)*) $($rest)*)
};
@ -420,11 +444,11 @@ macro_rules! __parse_ensure {
// path types
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $($dup:tt)*) :: $ident:ident $($rest:tt)*) => {
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $ident:tt $($dup:tt)*) :: $i:ident $($rest:tt)*) => {
$crate::__parse_ensure!(tpath $stack $bail ($($fuel)*) {($($buf)* $colons $ident) $($parse)*} ($($rest)*) $($rest)*)
};
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt $ident:ident $($rest:tt)*) => {
(type $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($ident:tt $($dup:tt)*) $i:ident $($rest:tt)*) => {
$crate::__parse_ensure!(tpath $stack $bail ($($fuel)*) {($($buf)* $ident) $($parse)*} ($($rest)*) $($rest)*)
};
@ -436,27 +460,35 @@ macro_rules! __parse_ensure {
$crate::__parse_ensure!(generic (tpath $stack) $bail ($($fuel)*) {($($buf)* $langle) $($parse)*} ($($rest)*) $($rest)*)
};
(tpath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt << $($rest:tt)*) => {
$crate::__parse_ensure!(generic (tpath $stack) $bail ($($fuel)*) {($($buf)* <) $($parse)*} (< $($rest)*) < $($rest)*)
(tpath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($langle:tt $($dup:tt)*) << $($rest:tt)*) => {
$crate::__parse_ensure!(type (qpath (tpath (arglist (tpath $stack)))) $bail ($($fuel)*) {($($buf)* $langle) $($parse)*} ($($rest)*) $($rest)*)
};
(tpath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt <- $($rest:tt)*) => {
$crate::__parse_ensure!(generic (tpath $stack) $bail ($($fuel)*) {($($buf)* <) $($parse)*} (- $($rest)*) - $($rest)*)
(tpath $stack:tt ($($bail:tt)*) (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt <- - $($rest:tt)*) => {
$crate::__fallback_ensure!($($bail)*)
};
(tpath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($larrow:tt $($dup:tt)*) <- $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(generic (tpath $stack) $bail ($($fuel)*) {($($buf)* $larrow) $($parse)*} ($($dup)*) $($dup)*)
};
(tpath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $langle:tt $($dup:tt)*) :: < $($rest:tt)*) => {
$crate::__parse_ensure!(generic (tpath $stack) $bail ($($fuel)*) {($($buf)* $colons $langle) $($parse)*} ($($rest)*) $($rest)*)
};
(tpath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $($dup:tt)*) :: << $($rest:tt)*) => {
$crate::__parse_ensure!(generic (tpath $stack) $bail ($($fuel)*) {($($buf)* $colons <) $($parse)*} (< $($rest)*) < $($rest)*)
(tpath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $langle:tt $($dup:tt)*) :: << $($rest:tt)*) => {
$crate::__parse_ensure!(type (qpath (tpath (arglist (tpath $stack)))) $bail ($($fuel)*) {($($buf)* $colons $langle) $($parse)*} ($($rest)*) $($rest)*)
};
(tpath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $($dup:tt)*) :: <- $($rest:tt)*) => {
$crate::__parse_ensure!(generic (tpath $stack) $bail ($($fuel)*) {($($buf)* $colons <) $($parse)*} (- $($rest)*) - $($rest)*)
(tpath $stack:tt ($($bail:tt)*) (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt :: <- - $($rest:tt)*) => {
$crate::__fallback_ensure!($($bail)*)
};
(tpath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $($dup:tt)*) :: $ident:ident $($rest:tt)*) => {
(tpath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $larrow:tt $($dup:tt)*) :: <- $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(generic (tpath $stack) $bail ($($fuel)*) {($($buf)* $colons $larrow) $($parse)*} ($($dup)*) $($dup)*)
};
(tpath $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $ident:tt $($dup:tt)*) :: $i:ident $($rest:tt)*) => {
$crate::__parse_ensure!(tpath $stack $bail ($($fuel)*) {($($buf)* $colons $ident) $($parse)*} ($($rest)*) $($rest)*)
};
@ -494,7 +526,11 @@ macro_rules! __parse_ensure {
// qualified paths
(qpath ($pop:ident $stack:tt) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($rangle:tt $colons:tt $($dup:tt)*) > :: $ident:ident $($rest:tt)*) => {
(qpath (split ($pop:ident $stack:tt)) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($rangle:tt $colons:tt $ident:tt $($dup:tt)*) >> :: $i:ident $($rest:tt)*) => {
$crate::__parse_ensure!($pop $stack $bail ($($fuel)*) {($($buf)* $rangle $colons $ident) $($parse)*} ($($rest)*) $($rest)*)
};
(qpath ($pop:ident $stack:tt) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($rangle:tt $colons:tt $ident:tt $($dup:tt)*) > :: $i:ident $($rest:tt)*) => {
$crate::__parse_ensure!($pop $stack $bail ($($fuel)*) {($($buf)* $rangle $colons $ident) $($parse)*} ($($rest)*) $($rest)*)
};
@ -504,41 +540,61 @@ macro_rules! __parse_ensure {
// trait objects
(object (arglist $stack:tt) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($plus:tt $colons:tt $($dup:tt)*) + :: $ident:ident $($rest:tt)*) => {
(object (arglist $stack:tt) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($plus:tt $colons:tt $ident:tt $($dup:tt)*) + :: $i:ident $($rest:tt)*) => {
$crate::__parse_ensure!(tpath (arglist $stack) $bail ($($fuel)*) {($($buf)* $plus $colons $ident) $($parse)*} ($($rest)*) $($rest)*)
};
(object (arglist $stack:tt) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($plus:tt $($dup:tt)*) + $ident:ident $($rest:tt)*) => {
(object (arglist $stack:tt) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($plus:tt $ident:tt $($dup:tt)*) + $i:ident $($rest:tt)*) => {
$crate::__parse_ensure!(tpath (arglist $stack) $bail ($($fuel)*) {($($buf)* $plus $ident) $($parse)*} ($($rest)*) $($rest)*)
};
(object (split ($pop:ident $stack:tt)) $bail:tt (~$($fuel:tt)*) $parse:tt $dup:tt $($rest:tt)*) => {
$crate::__parse_ensure!($pop (split $stack) $bail ($($fuel)*) $parse $dup $($rest)*)
};
(object ($pop:ident $stack:tt) $bail:tt (~$($fuel:tt)*) $parse:tt $dup:tt $($rest:tt)*) => {
$crate::__parse_ensure!($pop $stack $bail ($($fuel)*) $parse $dup $($rest)*)
};
// angle bracketed generic arguments
(generic (split ($pop:ident $stack:tt)) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($rangle:tt $($dup:tt)*) >> $($rest:tt)*) => {
$crate::__parse_ensure!($pop $stack $bail ($($fuel)*) {($($buf)* $rangle) $($parse)*} ($($rest)*) $($rest)*)
};
(generic ($pop:ident $stack:tt) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($rangle:tt $($dup:tt)*) > $($rest:tt)*) => {
$crate::__parse_ensure!($pop $stack $bail ($($fuel)*) {($($buf)* $rangle) $($parse)*} ($($rest)*) $($rest)*)
};
(generic ($pop:ident $stack:tt) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt >> $($rest:tt)*) => {
$crate::__parse_ensure!($pop $stack $bail ($($fuel)*) {($($buf)* >) $($parse)*} (> $($rest)*) > $($rest)*)
(generic ($pop:ident $stack:tt) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($rangle:tt $($dup:tt)*) >> $($rest:tt)*) => {
$crate::__parse_ensure!($pop (split $stack) $bail ($($fuel)*) {($($buf)*) $($parse)*} ($rangle $($rest)*) $rangle $($rest)*)
};
(generic $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(arglist $stack $bail ($($fuel)*) {($($buf)* $lit) $($parse)*} ($($rest)*) $($rest)*)
(generic $stack:tt ($($bail:tt)*) (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt - - $($rest:tt)*) => {
$crate::__fallback_ensure!($($bail)*)
};
(generic $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($neg:tt $($dup:tt)*) - $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(generic $stack $bail ($($fuel)*) {($($buf)* $neg) $($parse)*} ($($dup)*) $($dup)*)
};
(generic $stack:tt ($($bail:tt)*) (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt - $($rest:tt)*) => {
$crate::__fallback_ensure!($($bail)*)
};
(generic $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($literal:tt $($dup:tt)*) $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(arglist $stack $bail ($($fuel)*) {($($buf)* $literal) $($parse)*} ($($rest)*) $($rest)*)
};
(generic $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($brace:tt $($dup:tt)*) {$($block:tt)*} $($rest:tt)*) => {
$crate::__parse_ensure!(arglist $stack $bail ($($fuel)*) {($($buf)* $brace) $($parse)*} ($($rest)*) $($rest)*)
};
(generic $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt $life:lifetime $($rest:tt)*) => {
$crate::__parse_ensure!(arglist $stack $bail ($($fuel)*) {($($buf)* $life) $($parse)*} ($($rest)*) $($rest)*)
(generic $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($lifetime:tt $($dup:tt)*) $l:lifetime $($rest:tt)*) => {
$crate::__parse_ensure!(arglist $stack $bail ($($fuel)*) {($($buf)* $lifetime) $($parse)*} ($($rest)*) $($rest)*)
};
(generic $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($assoc:ident $eq:tt $($dup:tt)*) $ident:ident = $($rest:tt)*) => {
(generic $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($assoc:tt $eq:tt $($dup:tt)*) $ident:ident = $($rest:tt)*) => {
$crate::__parse_ensure!(type (arglist $stack) $bail ($($fuel)*) {($($buf)* $assoc $eq) $($parse)*} ($($rest)*) $($rest)*)
};
@ -550,12 +606,16 @@ macro_rules! __parse_ensure {
$crate::__parse_ensure!(generic $stack $bail ($($fuel)*) {($($buf)* $comma) $($parse)*} ($($rest)*) $($rest)*)
};
(arglist (split ($pop:ident $stack:tt)) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($rangle:tt $($dup:tt)*) >> $($rest:tt)*) => {
$crate::__parse_ensure!($pop $stack $bail ($($fuel)*) {($($buf)*) $rangle $($parse)*} ($($rest)*) $($rest)*)
};
(arglist ($pop:ident $stack:tt) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($rangle:tt $($dup:tt)*) > $($rest:tt)*) => {
$crate::__parse_ensure!($pop $stack $bail ($($fuel)*) {($($buf)* $rangle) $($parse)*} ($($rest)*) $($rest)*)
};
(arglist ($pop:ident $stack:tt) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt >> $($rest:tt)*) => {
$crate::__parse_ensure!($pop $stack $bail ($($fuel)*) {($($buf)* >) $($parse)*} (> $($rest)*) > $($rest)*)
(arglist ($pop:ident $stack:tt) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($rangle:tt $($dup:tt)*) >> $($rest:tt)*) => {
$crate::__parse_ensure!($pop (split $stack) $bail ($($fuel)*) {($($buf)*) $($parse)*} ($rangle $($rest)*) $rangle $($rest)*)
};
// patterns
@ -584,8 +644,20 @@ macro_rules! __parse_ensure {
$crate::__parse_ensure!(pat $stack $bail ($($fuel)*) {($($buf)* $at) $($parse)*} ($($rest)*) $($rest)*)
};
(pat $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(pat $stack $bail ($($fuel)*) {($($buf)* $lit) $($parse)*} ($($rest)*) $($rest)*)
(pat $stack:tt ($($bail:tt)*) (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt - - $($rest:tt)*) => {
$crate::__fallback_ensure!($($bail)*)
};
(pat $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($neg:tt $($dup:tt)*) - $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(pat $stack $bail ($($fuel)*) {($($buf)* $neg) $($parse)*} ($($dup)*) $($dup)*)
};
(pat $stack:tt ($($bail:tt)*) (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt - $($rest:tt)*) => {
$crate::__fallback_ensure!($($bail)*)
};
(pat $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($literal:tt $($dup:tt)*) $lit:literal $($rest:tt)*) => {
$crate::__parse_ensure!(pat $stack $bail ($($fuel)*) {($($buf)* $literal) $($parse)*} ($($rest)*) $($rest)*)
};
(pat $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($range:tt $($dup:tt)*) .. $($rest:tt)*) => {
@ -620,11 +692,11 @@ macro_rules! __parse_ensure {
$crate::__parse_ensure!(pat $stack $bail ($($fuel)*) {($($buf)* $wild) $($parse)*} ($($rest)*) $($rest)*)
};
(pat $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $($dup:tt)*) :: $ident:ident $($rest:tt)*) => {
(pat $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($colons:tt $ident:tt $($dup:tt)*) :: $i:ident $($rest:tt)*) => {
$crate::__parse_ensure!(epath (pat $stack) $bail ($($fuel)*) {($($buf)* $colons $ident) $($parse)*} ($($rest)*) $($rest)*)
};
(pat $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt $ident:ident $($rest:tt)*) => {
(pat $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($ident:tt $($dup:tt)*) $i:ident $($rest:tt)*) => {
$crate::__parse_ensure!(epath (pat $stack) $bail ($($fuel)*) {($($buf)* $ident) $($parse)*} ($($rest)*) $($rest)*)
};
@ -632,48 +704,6 @@ macro_rules! __parse_ensure {
$crate::__parse_ensure!(type (qpath (epath (pat $stack))) $bail ($($fuel)*) {($($buf)* $langle) $($parse)*} ($($rest)*) $($rest)*)
};
// high precedence binary operators
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($add:tt $($dup:tt)*) + $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $add) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($sub:tt $($dup:tt)*) - $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $sub) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($mul:tt $($dup:tt)*) * $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $mul) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($div:tt $($dup:tt)*) / $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $div) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($rem:tt $($dup:tt)*) % $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $rem) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($bitxor:tt $($dup:tt)*) ^ $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $bitxor) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($bitand:tt $($dup:tt)*) & $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $bitand) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($bitor:tt $($dup:tt)*) | $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $bitor) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($shl:tt $($dup:tt)*) << $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $shl) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($shr:tt $($dup:tt)*) >> $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $shr) $($parse)*} ($($rest)*) $($rest)*)
};
// comparison binary operators
(atom () $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($eq:tt $($dup:tt)*) == $($rest:tt)*) => {
@ -716,14 +746,64 @@ macro_rules! __parse_ensure {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $ge) $($parse)*} ($($rest)*) $($rest)*)
};
(atom (split ()) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} $dup:tt >> $($rest:tt)*) => {
$crate::__parse_ensure!(0 () $bail ($($fuel)*) {() $($parse)* ($($buf)* > ) > } ($($rest)*) $($rest)*)
};
(atom () $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($gt:tt $($dup:tt)*) > $($rest:tt)*) => {
$crate::__parse_ensure!(0 () $bail ($($fuel)*) {() $($parse)* ($($buf)*) $gt} ($($rest)*) $($rest)*)
};
(atom (split $stack:tt) $bail:tt (~$($fuel:tt)*) {($($buf:tt)+) $($parse:tt)*} ($rangle:tt $($dup:tt)*) >> $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $rangle) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)+) $($parse:tt)*} ($gt:tt $($dup:tt)*) > $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $gt) $($parse)*} ($($rest)*) $($rest)*)
};
// high precedence binary operators
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($add:tt $($dup:tt)*) + $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $add) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($sub:tt $($dup:tt)*) - $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $sub) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($mul:tt $($dup:tt)*) * $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $mul) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($div:tt $($dup:tt)*) / $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $div) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($rem:tt $($dup:tt)*) % $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $rem) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($bitxor:tt $($dup:tt)*) ^ $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $bitxor) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($bitand:tt $($dup:tt)*) & $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $bitand) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($bitor:tt $($dup:tt)*) | $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $bitor) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($shl:tt $($dup:tt)*) << $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $shl) $($parse)*} ($($rest)*) $($rest)*)
};
(atom $stack:tt $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($shr:tt $($dup:tt)*) >> $($rest:tt)*) => {
$crate::__parse_ensure!(0 $stack $bail ($($fuel)*) {($($buf)* $shr) $($parse)*} ($($rest)*) $($rest)*)
};
// low precedence binary operators
(atom ($($stack:tt)+) $bail:tt (~$($fuel:tt)*) {($($buf:tt)*) $($parse:tt)*} ($and:tt $($dup:tt)*) && $($rest:tt)*) => {
@ -815,24 +895,24 @@ macro_rules! __fancy_ensure {
#[macro_export]
macro_rules! __fallback_ensure {
($cond:expr $(,)?) => {
if !$cond {
if $crate::__private::not($cond) {
return $crate::__private::Err($crate::Error::msg(
$crate::__private::concat!("Condition failed: `", $crate::__private::stringify!($cond), "`")
));
}
};
($cond:expr, $msg:literal $(,)?) => {
if !$cond {
if $crate::__private::not($cond) {
return $crate::__private::Err($crate::__anyhow!($msg));
}
};
($cond:expr, $err:expr $(,)?) => {
if !$cond {
if $crate::__private::not($cond) {
return $crate::__private::Err($crate::__anyhow!($err));
}
};
($cond:expr, $fmt:expr, $($arg:tt)*) => {
if !$cond {
if $crate::__private::not($cond) {
return $crate::__private::Err($crate::__anyhow!($fmt, $($arg)*));
}
};

View File

@ -17,7 +17,7 @@
//! the return type of any fallible function.
//!
//! Within the function, use `?` to easily propagate any error that implements
//! the `std::error::Error` trait.
//! the [`std::error::Error`] trait.
//!
//! ```
//! # pub trait Deserialize {}
@ -192,8 +192,8 @@
//!
//! # No-std support
//!
//! In no_std mode, the same API is almost all available and works the same way.
//! To depend on Anyhow in no_std mode, disable our default enabled "std"
//! In no_std mode, almost all of the same API is available and works the same
//! way. To depend on Anyhow in no_std mode, disable our default enabled "std"
//! feature in Cargo.toml. A global allocator is required.
//!
//! ```toml
@ -206,7 +206,7 @@
//! will require an explicit `.map_err(Error::msg)` when working with a
//! non-Anyhow error type inside a function that returns Anyhow's error type.
#![doc(html_root_url = "https://docs.rs/anyhow/1.0.81")]
#![doc(html_root_url = "https://docs.rs/anyhow/1.0.85")]
#![cfg_attr(error_generic_member_access, feature(error_generic_member_access))]
#![cfg_attr(doc_cfg, feature(doc_cfg))]
#![no_std]
@ -383,7 +383,7 @@ pub use anyhow as format_err;
/// # Ok(())
/// }
/// ```
#[cfg_attr(not(doc), repr(transparent))]
#[repr(transparent)]
pub struct Error {
inner: Own<ErrorImpl>,
}
@ -651,6 +651,7 @@ pub fn Ok<T>(t: T) -> Result<T> {
// Not public API. Referenced by macro-generated code.
#[doc(hidden)]
pub mod __private {
use self::not::Bool;
use crate::Error;
use alloc::fmt;
use core::fmt::Arguments;
@ -699,4 +700,31 @@ pub mod __private {
pub fn must_use(error: Error) -> Error {
error
}
#[doc(hidden)]
#[inline]
pub fn not(cond: impl Bool) -> bool {
cond.not()
}
mod not {
#[doc(hidden)]
pub trait Bool {
fn not(self) -> bool;
}
impl Bool for bool {
#[inline]
fn not(self) -> bool {
!self
}
}
impl Bool for &bool {
#[inline]
fn not(self) -> bool {
!*self
}
}
}
}

View File

@ -65,59 +65,66 @@ macro_rules! bail {
};
}
/// Return early with an error if a condition is not satisfied.
///
/// This macro is equivalent to `if !$cond { return
/// Err(`[`anyhow!($args...)`][anyhow!]`); }`.
///
/// The surrounding function's or closure's return value is required to be
/// `Result<_,`[`anyhow::Error`][crate::Error]`>`.
///
/// Analogously to `assert!`, `ensure!` takes a condition and exits the function
/// if the condition fails. Unlike `assert!`, `ensure!` returns an `Error`
/// rather than panicking.
///
/// [anyhow!]: crate::anyhow
///
/// # Example
///
/// ```
/// # use anyhow::{ensure, Result};
/// #
/// # fn main() -> Result<()> {
/// # let user = 0;
/// #
/// ensure!(user == 0, "only user 0 is allowed");
/// # Ok(())
/// # }
/// ```
///
/// ```
/// # use anyhow::{ensure, Result};
/// # use thiserror::Error;
/// #
/// # const MAX_DEPTH: usize = 1;
/// #
/// #[derive(Error, Debug)]
/// enum ScienceError {
/// #[error("recursion limit exceeded")]
/// RecursionLimitExceeded,
/// # #[error("...")]
/// # More = (stringify! {
/// ...
/// # }, 1).1,
/// }
///
/// # fn main() -> Result<()> {
/// # let depth = 0;
/// #
/// ensure!(depth <= MAX_DEPTH, ScienceError::RecursionLimitExceeded);
/// # Ok(())
/// # }
/// ```
macro_rules! __ensure {
($ensure:item) => {
/// Return early with an error if a condition is not satisfied.
///
/// This macro is equivalent to `if !$cond { return
/// Err(`[`anyhow!($args...)`][anyhow!]`); }`.
///
/// The surrounding function's or closure's return value is required to be
/// `Result<_,`[`anyhow::Error`][crate::Error]`>`.
///
/// Analogously to `assert!`, `ensure!` takes a condition and exits the function
/// if the condition fails. Unlike `assert!`, `ensure!` returns an `Error`
/// rather than panicking.
///
/// [anyhow!]: crate::anyhow
///
/// # Example
///
/// ```
/// # use anyhow::{ensure, Result};
/// #
/// # fn main() -> Result<()> {
/// # let user = 0;
/// #
/// ensure!(user == 0, "only user 0 is allowed");
/// # Ok(())
/// # }
/// ```
///
/// ```
/// # use anyhow::{ensure, Result};
/// # use thiserror::Error;
/// #
/// # const MAX_DEPTH: usize = 1;
/// #
/// #[derive(Error, Debug)]
/// enum ScienceError {
/// #[error("recursion limit exceeded")]
/// RecursionLimitExceeded,
/// # #[error("...")]
/// # More = (stringify! {
/// ...
/// # }, 1).1,
/// }
///
/// # fn main() -> Result<()> {
/// # let depth = 0;
/// #
/// ensure!(depth <= MAX_DEPTH, ScienceError::RecursionLimitExceeded);
/// # Ok(())
/// # }
/// ```
$ensure
};
}
#[cfg(doc)]
#[macro_export]
macro_rules! ensure {
__ensure![
#[macro_export]
macro_rules! ensure {
($cond:expr $(,)?) => {
if !$cond {
return $crate::__private::Err($crate::Error::msg(
@ -140,11 +147,13 @@ macro_rules! ensure {
return $crate::__private::Err($crate::__anyhow!($fmt, $($arg)*));
}
};
}
}
];
#[cfg(not(doc))]
#[macro_export]
macro_rules! ensure {
__ensure![
#[macro_export]
macro_rules! ensure {
($($tt:tt)*) => {
$crate::__parse_ensure!(
/* state */ 0
@ -156,7 +165,8 @@ macro_rules! ensure {
/* rest */ $($tt)*
)
};
}
}
];
/// Construct an ad-hoc error from a string or existing non-`anyhow` error
/// value.

View File

@ -1,6 +1,9 @@
#![allow(
clippy::bool_to_int_with_if,
clippy::char_lit_as_u8,
clippy::deref_addrof,
clippy::diverging_sub_expression,
clippy::erasing_op,
clippy::extra_unused_type_parameters,
clippy::if_same_then_else,
clippy::ifs_same_cond,
@ -16,6 +19,7 @@
clippy::redundant_pattern_matching,
clippy::too_many_lines,
clippy::unit_arg,
clippy::unnecessary_cast,
clippy::while_immutable_condition,
clippy::zero_ptr,
irrefutable_let_patterns
@ -128,6 +132,19 @@ fn test_low_precedence_binary_operator() {
test,
"Condition failed: `while false == true && false {} < ()` (() vs ())",
);
let a = 15;
let b = 3;
let test = || Ok(ensure!(a <= b || a - b <= 10));
assert_err(test, "Condition failed: `a <= b || a - b <= 10`");
}
#[test]
fn test_high_precedence_binary_operator() {
let a = 15;
let b = 3;
let test = || Ok(ensure!(a - b <= 10));
assert_err(test, "Condition failed: `a - b <= 10` (12 vs 10)");
}
#[test]
@ -464,7 +481,9 @@ fn test_trailer() {
fn test_whitespace() {
#[derive(Debug)]
pub struct Point {
#[allow(dead_code)]
pub x: i32,
#[allow(dead_code)]
pub y: i32,
}

View File

@ -3,6 +3,7 @@
clippy::eq_op,
clippy::incompatible_msrv, // https://github.com/rust-lang/rust-clippy/issues/12257
clippy::items_after_statements,
clippy::match_single_binding,
clippy::needless_pass_by_value,
clippy::shadow_unrelated,
clippy::wildcard_imports
@ -11,7 +12,7 @@
mod common;
use self::common::*;
use anyhow::{anyhow, ensure};
use anyhow::{anyhow, ensure, Result};
use std::cell::Cell;
use std::future;
@ -53,6 +54,20 @@ fn test_ensure() {
);
}
#[test]
fn test_ensure_nonbool() -> Result<()> {
struct Struct {
condition: bool,
}
let s = Struct { condition: true };
match &s {
Struct { condition } => ensure!(condition), // &bool
}
Ok(())
}
#[test]
fn test_temporaries() {
fn require_send_sync(_: impl Send + Sync) {}

View File

@ -0,0 +1,40 @@
use anyhow::{ensure, Result};
use std::ops::{Deref, Not};
struct Bool(bool);
struct DerefBool(bool);
struct NotBool(bool);
impl Deref for DerefBool {
type Target = bool;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Not for NotBool {
type Output = bool;
fn not(self) -> Self::Output {
!self.0
}
}
fn main() -> Result<()> {
ensure!("...");
let mut s = Bool(true);
match &mut s {
Bool(cond) => ensure!(cond),
}
let db = DerefBool(true);
ensure!(db);
ensure!(&db);
let nb = NotBool(true);
ensure!(nb);
Ok(())
}

View File

@ -0,0 +1,91 @@
error[E0277]: the trait bound `&str: __private::not::Bool` is not satisfied
--> tests/ui/ensure-nonbool.rs:25:13
|
25 | ensure!("...");
| --------^^^^^-
| | |
| | the trait `__private::not::Bool` is not implemented for `&str`
| required by a bound introduced by this call
|
= help: the following other types implement trait `__private::not::Bool`:
&bool
bool
note: required by a bound in `anyhow::__private::not`
--> src/lib.rs
|
| pub fn not(cond: impl Bool) -> bool {
| ^^^^ required by this bound in `not`
error[E0277]: the trait bound `&mut bool: __private::not::Bool` is not satisfied
--> tests/ui/ensure-nonbool.rs:29:31
|
29 | Bool(cond) => ensure!(cond),
| --------^^^^-
| | |
| | the trait `__private::not::Bool` is not implemented for `&mut bool`
| required by a bound introduced by this call
|
= help: the following other types implement trait `__private::not::Bool`:
&bool
bool
= note: `__private::not::Bool` is implemented for `&bool`, but not for `&mut bool`
note: required by a bound in `anyhow::__private::not`
--> src/lib.rs
|
| pub fn not(cond: impl Bool) -> bool {
| ^^^^ required by this bound in `not`
error[E0277]: the trait bound `DerefBool: __private::not::Bool` is not satisfied
--> tests/ui/ensure-nonbool.rs:33:13
|
33 | ensure!(db);
| --------^^-
| | |
| | the trait `__private::not::Bool` is not implemented for `DerefBool`
| required by a bound introduced by this call
|
= help: the following other types implement trait `__private::not::Bool`:
&bool
bool
note: required by a bound in `anyhow::__private::not`
--> src/lib.rs
|
| pub fn not(cond: impl Bool) -> bool {
| ^^^^ required by this bound in `not`
error[E0277]: the trait bound `&DerefBool: __private::not::Bool` is not satisfied
--> tests/ui/ensure-nonbool.rs:34:13
|
34 | ensure!(&db);
| --------^^^-
| | |
| | the trait `__private::not::Bool` is not implemented for `&DerefBool`
| required by a bound introduced by this call
|
note: required by a bound in `anyhow::__private::not`
--> src/lib.rs
|
| pub fn not(cond: impl Bool) -> bool {
| ^^^^ required by this bound in `not`
help: consider dereferencing here
|
34 | ensure!(&*db);
| +
error[E0277]: the trait bound `NotBool: __private::not::Bool` is not satisfied
--> tests/ui/ensure-nonbool.rs:37:13
|
37 | ensure!(nb);
| --------^^-
| | |
| | the trait `__private::not::Bool` is not implemented for `NotBool`
| required by a bound introduced by this call
|
= help: the following other types implement trait `__private::not::Bool`:
&bool
bool
note: required by a bound in `anyhow::__private::not`
--> src/lib.rs
|
| pub fn not(cond: impl Bool) -> bool {
| ^^^^ required by this bound in `not`

View File

@ -27,6 +27,6 @@ note: the traits `Into` and `std::fmt::Display` must be implemented
= help: items from traits can only be used if the trait is implemented and in scope
= note: the following traits define an item `anyhow_kind`, perhaps you need to implement one of them:
candidate #1: `anyhow::kind::AdhocKind`
candidate #2: `anyhow::kind::TraitKind`
candidate #3: `anyhow::kind::BoxedKind`
candidate #2: `anyhow::kind::BoxedKind`
candidate #3: `anyhow::kind::TraitKind`
= note: this error originates in the macro `anyhow` (in Nightly builds, run with -Z macro-backtrace for more info)

View File

@ -1 +1 @@
{"files":{"Cargo.lock":"400a67d2fb1c1dc7160f7730a36ae05b398af392ac270980f00e87f659e2a6e0","Cargo.toml":"1a4d6915017b2461f8037a320bcc62b424fcb866e77bf4da5dfce1e5a03f84ca","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"27995d58ad5c1145c1a8cd86244ce844886958a35eb2b78c6b772748669999ac","README.md":"62e4f60d7b6f393dd36f06f348560dbbc871de6fb1cbcfcaec34fa1d9527f81b","examples/integers.rs":"589ff4271566dfa322becddf3e2c7b592e6e0bc97b02892ce75619b7e452e930","examples/paths.rs":"1b30e466b824ce8df7ad0a55334424131d9d2573d6cf9f7d5d50c09c8901d526","examples/traits.rs":"cbee6a3e1f7db60b02ae25b714926517144a77cb492021f492774cf0e1865a9e","examples/versions.rs":"38535e6d9f5bfae0de474a3db79a40e8f5da8ba9334c5ff4c363de9bc99d4d12","src/error.rs":"12de7dafea4a35d1dc2f0fa79bfa038386bbbea72bf083979f4ddf227999eeda","src/lib.rs":"07f24132bbed72428d468fddd40b8753b53d57d15d1b31288531672c1a5ab07d","src/tests.rs":"f0e6dc1ad9223c0336c02e215ea3940acb2af6c3bc8fd791e16cd4e786e6a608","src/version.rs":"f1d457d44b6868b4690db795f7b67edfb25c29c6227381b43dcc5452716f2828","tests/no_std.rs":"cc5619466c6e955552f30ed2f80ba8ddf45c3d5d257f628f54dedc0da978f6aa","tests/rustflags.rs":"5c8169b88216055019db61b5d7baf4abdf675e3b14b54f5037bb1e3acd0a5d3f","tests/wrappers.rs":"1ace81fc706f0592647d6aa5bdd854a8e15fa2b6dcd4444334b4a5767ca7fd2c"},"package":"f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80"}
{"files":{"Cargo.lock":"8e60b691c883629c1e19c8dad7c4c18cded1a85c2e8f2e2a77e7a96957f865a7","Cargo.toml":"f8c2d5675773b7ddff8801c3c17ff9d26e9632fe2027a6b81ceb7bda67d71bfd","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"27995d58ad5c1145c1a8cd86244ce844886958a35eb2b78c6b772748669999ac","README.md":"51238410cbbb06b149a794d954ff4d5943da566a97e71dc3aa8b5fa5a9b538b6","examples/integers.rs":"589ff4271566dfa322becddf3e2c7b592e6e0bc97b02892ce75619b7e452e930","examples/nightly.rs":"ac8b5a9aa1e04465e44f5053b3c899b635e07af058c73aa8b45176bf4b5912f9","examples/paths.rs":"1b30e466b824ce8df7ad0a55334424131d9d2573d6cf9f7d5d50c09c8901d526","examples/traits.rs":"cbee6a3e1f7db60b02ae25b714926517144a77cb492021f492774cf0e1865a9e","examples/versions.rs":"38535e6d9f5bfae0de474a3db79a40e8f5da8ba9334c5ff4c363de9bc99d4d12","src/error.rs":"fd8ff67c64f7cd1b9f81325a81de4baa34c39d6ae298bdb33f9829cc91acac39","src/lib.rs":"7b00cd501b52bab797bf047ada2a446e16fc2b8b05567670d8b8b7ada3179f55","src/rustc.rs":"a8a213ddb64a05c1a1af933bcb331a98879e942b167c33d8f94f9f60ebb14e29","src/tests.rs":"594a1cff6fef4a0f8b5f962a668fda4030db5005f37f01eeb06d692fc48a60df","src/version.rs":"4f7d23b36f01c7be1871be86c038d6cb4689e145d67c82d3793690e9aa05b133","tests/no_std.rs":"cc5619466c6e955552f30ed2f80ba8ddf45c3d5d257f628f54dedc0da978f6aa","tests/rustflags.rs":"5c8169b88216055019db61b5d7baf4abdf675e3b14b54f5037bb1e3acd0a5d3f","tests/wrap_ignored":"a9e241edf584a0702066b25bc15c5bbfd8a1019e14fb655fc4f47a67360065ca","tests/wrappers.rs":"e8eb0eb5ac28ecd9e3473b5ddc321b1d4d523a6fb0c072255ac37d40674aa35c"},"package":"0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"}

5
pve-rs/vendor/autocfg/Cargo.lock generated vendored
View File

@ -1,6 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "autocfg"
version = "1.2.0"
version = "1.3.0"

View File

@ -12,7 +12,7 @@
[package]
rust-version = "1.0"
name = "autocfg"
version = "1.2.0"
version = "1.3.0"
authors = ["Josh Stone <cuviper@gmail.com>"]
exclude = ["/.github/**"]
description = "Automatic cfg for Rust compiler features"

View File

@ -43,6 +43,11 @@ should only be used when the compiler supports it.
## Release Notes
- 1.3.0 (2024-05-03)
- Add `probe_raw` for direct control of the code that will be test-compiled.
- Use wrappers when querying the `rustc` version information too.
- 1.2.0 (2024-03-25)
- Add `no_std` and `set_no_std` to control the use of `#![no_std]` in probes.

View File

@ -0,0 +1,18 @@
extern crate autocfg;
fn main() {
// Normally, cargo will set `OUT_DIR` for build scripts.
let ac = autocfg::AutoCfg::with_dir("target").unwrap();
// When this feature was stabilized, it also renamed the method to
// `chunk_by`, so it's important to *use* the feature in your probe.
let code = r#"
#![feature(slice_group_by)]
pub fn probe(slice: &[i32]) -> impl Iterator<Item = &[i32]> {
slice.group_by(|a, b| a == b)
}
"#;
if ac.probe_raw(code).is_ok() {
autocfg::emit("has_slice_group_by");
}
}

View File

@ -2,6 +2,7 @@ use std::error;
use std::fmt;
use std::io;
use std::num;
use std::process;
use std::str;
/// A common error type for the `autocfg` crate.
@ -20,7 +21,7 @@ impl error::Error for Error {
ErrorKind::Io(ref e) => Some(e),
ErrorKind::Num(ref e) => Some(e),
ErrorKind::Utf8(ref e) => Some(e),
ErrorKind::Other(_) => None,
ErrorKind::Process(_) | ErrorKind::Other(_) => None,
}
}
}
@ -31,6 +32,10 @@ impl fmt::Display for Error {
ErrorKind::Io(ref e) => e.fmt(f),
ErrorKind::Num(ref e) => e.fmt(f),
ErrorKind::Utf8(ref e) => e.fmt(f),
ErrorKind::Process(ref status) => {
// Same message as the newer `ExitStatusError`
write!(f, "process exited unsuccessfully: {}", status)
}
ErrorKind::Other(s) => s.fmt(f),
}
}
@ -40,10 +45,17 @@ impl fmt::Display for Error {
enum ErrorKind {
Io(io::Error),
Num(num::ParseIntError),
Process(process::ExitStatus),
Utf8(str::Utf8Error),
Other(&'static str),
}
pub fn from_exit(status: process::ExitStatus) -> Error {
Error {
kind: ErrorKind::Process(status),
}
}
pub fn from_io(e: io::Error) -> Error {
Error {
kind: ErrorKind::Io(e),

View File

@ -61,10 +61,11 @@ macro_rules! try {
use std::env;
use std::ffi::OsString;
use std::fmt::Arguments;
use std::fs;
use std::io::{stderr, Write};
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::process::Stdio;
#[allow(deprecated)]
use std::sync::atomic::ATOMIC_USIZE_INIT;
use std::sync::atomic::{AtomicUsize, Ordering};
@ -72,6 +73,9 @@ use std::sync::atomic::{AtomicUsize, Ordering};
mod error;
pub use error::Error;
mod rustc;
use rustc::Rustc;
mod version;
use version::Version;
@ -82,9 +86,7 @@ mod tests;
#[derive(Clone, Debug)]
pub struct AutoCfg {
out_dir: PathBuf,
rustc: PathBuf,
rustc_wrapper: Option<PathBuf>,
rustc_workspace_wrapper: Option<PathBuf>,
rustc: Rustc,
rustc_version: Version,
target: Option<OsString>,
no_std: bool,
@ -155,9 +157,8 @@ impl AutoCfg {
/// - `dir` is not a writable directory.
///
pub fn with_dir<T: Into<PathBuf>>(dir: T) -> Result<Self, Error> {
let rustc = env::var_os("RUSTC").unwrap_or_else(|| "rustc".into());
let rustc: PathBuf = rustc.into();
let rustc_version = try!(Version::from_rustc(&rustc));
let rustc = Rustc::new();
let rustc_version = try!(rustc.version());
let target = env::var_os("TARGET");
@ -170,8 +171,6 @@ impl AutoCfg {
let mut ac = AutoCfg {
rustflags: rustflags(&target, &dir),
rustc_wrapper: get_rustc_wrapper(false),
rustc_workspace_wrapper: get_rustc_wrapper(true),
out_dir: dir,
rustc: rustc,
rustc_version: rustc_version,
@ -180,11 +179,11 @@ impl AutoCfg {
};
// Sanity check with and without `std`.
if !ac.probe("").unwrap_or(false) {
if !ac.probe_raw("").is_ok() {
if ac.probe_raw("#![no_std]").is_ok() {
ac.no_std = true;
if !ac.probe("").unwrap_or(false) {
} else {
// Neither worked, so assume nothing...
ac.no_std = false;
let warning = b"warning: autocfg could not probe for `std`\n";
stderr().write_all(warning).ok();
}
@ -207,7 +206,7 @@ impl AutoCfg {
///
/// See also [`set_no_std`](#method.set_no_std).
///
/// [prelude]: https://doc.rust-lang.org/reference/crates-and-source-files.html#preludes-and-no_std
/// [prelude]: https://doc.rust-lang.org/reference/names/preludes.html#the-no_std-attribute
pub fn no_std(&self) -> bool {
self.no_std
}
@ -233,23 +232,13 @@ impl AutoCfg {
}
}
fn probe<T: AsRef<[u8]>>(&self, code: T) -> Result<bool, Error> {
fn probe_fmt<'a>(&self, source: Arguments<'a>) -> Result<(), Error> {
#[allow(deprecated)]
static ID: AtomicUsize = ATOMIC_USIZE_INIT;
let id = ID.fetch_add(1, Ordering::Relaxed);
// Build the command with possible wrappers.
let mut rustc = self
.rustc_wrapper
.iter()
.chain(self.rustc_workspace_wrapper.iter())
.chain(Some(&self.rustc));
let mut command = Command::new(rustc.next().unwrap());
for arg in rustc {
command.arg(arg);
}
let mut command = self.rustc.command();
command
.arg("--crate-name")
.arg(format!("probe{}", id))
@ -268,14 +257,69 @@ impl AutoCfg {
let mut child = try!(command.spawn().map_err(error::from_io));
let mut stdin = child.stdin.take().expect("rustc stdin");
if self.no_std {
try!(stdin.write_all(b"#![no_std]\n").map_err(error::from_io));
}
try!(stdin.write_all(code.as_ref()).map_err(error::from_io));
try!(stdin.write_fmt(source).map_err(error::from_io));
drop(stdin);
let status = try!(child.wait().map_err(error::from_io));
Ok(status.success())
match child.wait() {
Ok(status) if status.success() => Ok(()),
Ok(status) => Err(error::from_exit(status)),
Err(error) => Err(error::from_io(error)),
}
}
fn probe<'a>(&self, code: Arguments<'a>) -> bool {
let result = if self.no_std {
self.probe_fmt(format_args!("#![no_std]\n{}", code))
} else {
self.probe_fmt(code)
};
result.is_ok()
}
/// Tests whether the given code can be compiled as a Rust library.
///
/// This will only return `Ok` if the compiler ran and exited successfully,
/// per `ExitStatus::success()`.
/// The code is passed to the compiler exactly as-is, notably not even
/// adding the [`#![no_std]`][Self::no_std] attribute like other probes.
///
/// Raw probes are useful for testing functionality that's not yet covered
/// by the rest of the `AutoCfg` API. For example, the following attribute
/// **must** be used at the crate level, so it wouldn't work within the code
/// templates used by other `probe_*` methods.
///
/// ```
/// # extern crate autocfg;
/// # // Normally, cargo will set `OUT_DIR` for build scripts.
/// # std::env::set_var("OUT_DIR", "target");
/// let ac = autocfg::new();
/// assert!(ac.probe_raw("#![no_builtins]").is_ok());
/// ```
///
/// Rust nightly features could be tested as well -- ideally including a
/// code sample to ensure the unstable feature still works as expected.
/// For example, `slice::group_by` was renamed to `chunk_by` when it was
/// stabilized, even though the feature name was unchanged, so testing the
/// `#![feature(..)]` alone wouldn't reveal that. For larger snippets,
/// [`include_str!`] may be useful to load them from separate files.
///
/// ```
/// # extern crate autocfg;
/// # // Normally, cargo will set `OUT_DIR` for build scripts.
/// # std::env::set_var("OUT_DIR", "target");
/// let ac = autocfg::new();
/// let code = r#"
/// #![feature(slice_group_by)]
/// pub fn probe(slice: &[i32]) -> impl Iterator<Item = &[i32]> {
/// slice.group_by(|a, b| a == b)
/// }
/// "#;
/// if ac.probe_raw(code).is_ok() {
/// autocfg::emit("has_slice_group_by");
/// }
/// ```
pub fn probe_raw(&self, code: &str) -> Result<(), Error> {
self.probe_fmt(format_args!("{}", code))
}
/// Tests whether the given sysroot crate can be used.
@ -286,8 +330,8 @@ impl AutoCfg {
/// extern crate CRATE as probe;
/// ```
pub fn probe_sysroot_crate(&self, name: &str) -> bool {
self.probe(format!("extern crate {} as probe;", name)) // `as _` wasn't stabilized until Rust 1.33
.unwrap_or(false)
// Note: `as _` wasn't stabilized until Rust 1.33
self.probe(format_args!("extern crate {} as probe;", name))
}
/// Emits a config value `has_CRATE` if `probe_sysroot_crate` returns true.
@ -305,7 +349,7 @@ impl AutoCfg {
/// pub use PATH;
/// ```
pub fn probe_path(&self, path: &str) -> bool {
self.probe(format!("pub use {};", path)).unwrap_or(false)
self.probe(format_args!("pub use {};", path))
}
/// Emits a config value `has_PATH` if `probe_path` returns true.
@ -333,8 +377,7 @@ impl AutoCfg {
/// pub trait Probe: TRAIT + Sized {}
/// ```
pub fn probe_trait(&self, name: &str) -> bool {
self.probe(format!("pub trait Probe: {} + Sized {{}}", name))
.unwrap_or(false)
self.probe(format_args!("pub trait Probe: {} + Sized {{}}", name))
}
/// Emits a config value `has_TRAIT` if `probe_trait` returns true.
@ -362,8 +405,7 @@ impl AutoCfg {
/// pub type Probe = TYPE;
/// ```
pub fn probe_type(&self, name: &str) -> bool {
self.probe(format!("pub type Probe = {};", name))
.unwrap_or(false)
self.probe(format_args!("pub type Probe = {};", name))
}
/// Emits a config value `has_TYPE` if `probe_type` returns true.
@ -391,8 +433,7 @@ impl AutoCfg {
/// pub fn probe() { let _ = EXPR; }
/// ```
pub fn probe_expression(&self, expr: &str) -> bool {
self.probe(format!("pub fn probe() {{ let _ = {}; }}", expr))
.unwrap_or(false)
self.probe(format_args!("pub fn probe() {{ let _ = {}; }}", expr))
}
/// Emits the given `cfg` value if `probe_expression` returns true.
@ -410,8 +451,7 @@ impl AutoCfg {
/// pub const PROBE: () = ((), EXPR).0;
/// ```
pub fn probe_constant(&self, expr: &str) -> bool {
self.probe(format!("pub const PROBE: () = ((), {}).0;", expr))
.unwrap_or(false)
self.probe(format_args!("pub const PROBE: () = ((), {}).0;", expr))
}
/// Emits the given `cfg` value if `probe_constant` returns true.
@ -493,27 +533,3 @@ fn rustflags(target: &Option<OsString>, dir: &Path) -> Vec<String> {
Vec::new()
}
fn get_rustc_wrapper(workspace: bool) -> Option<PathBuf> {
// We didn't really know whether the workspace wrapper is applicable until Cargo started
// deliberately setting or unsetting it in rust-lang/cargo#9601. We'll use the encoded
// rustflags as a proxy for that change for now, but we could instead check version 1.55.
if workspace && env::var_os("CARGO_ENCODED_RUSTFLAGS").is_none() {
return None;
}
let name = if workspace {
"RUSTC_WORKSPACE_WRAPPER"
} else {
"RUSTC_WRAPPER"
};
if let Some(wrapper) = env::var_os(name) {
// NB: `OsStr` didn't get `len` or `is_empty` until 1.9.
if wrapper != OsString::new() {
return Some(wrapper.into());
}
}
None
}

89
pve-rs/vendor/autocfg/src/rustc.rs vendored Normal file
View File

@ -0,0 +1,89 @@
use std::env;
use std::ffi::OsString;
use std::path::PathBuf;
use std::process::Command;
use super::error::Error;
use super::version::Version;
#[derive(Clone, Debug)]
pub struct Rustc {
rustc: PathBuf,
rustc_wrapper: Option<PathBuf>,
rustc_workspace_wrapper: Option<PathBuf>,
}
impl Rustc {
pub fn new() -> Self {
Rustc {
rustc: env::var_os("RUSTC")
.unwrap_or_else(|| "rustc".into())
.into(),
rustc_wrapper: get_rustc_wrapper(false),
rustc_workspace_wrapper: get_rustc_wrapper(true),
}
}
/// Build the command with possible wrappers.
pub fn command(&self) -> Command {
let mut rustc = self
.rustc_wrapper
.iter()
.chain(self.rustc_workspace_wrapper.iter())
.chain(Some(&self.rustc));
let mut command = Command::new(rustc.next().unwrap());
for arg in rustc {
command.arg(arg);
}
command
}
/// Try to get the `rustc` version.
pub fn version(&self) -> Result<Version, Error> {
// Some wrappers like clippy-driver don't pass through version commands,
// so we try to fall back to combinations without each wrapper.
macro_rules! try_version {
($command:expr) => {
if let Ok(value) = Version::from_command($command) {
return Ok(value);
}
};
}
let rustc = &self.rustc;
if let Some(ref rw) = self.rustc_wrapper {
if let Some(ref rww) = self.rustc_workspace_wrapper {
try_version!(Command::new(rw).args(&[rww, rustc]));
}
try_version!(Command::new(rw).arg(rustc));
}
if let Some(ref rww) = self.rustc_workspace_wrapper {
try_version!(Command::new(rww).arg(rustc));
}
Version::from_command(&mut Command::new(rustc))
}
}
fn get_rustc_wrapper(workspace: bool) -> Option<PathBuf> {
// We didn't really know whether the workspace wrapper is applicable until Cargo started
// deliberately setting or unsetting it in rust-lang/cargo#9601. We'll use the encoded
// rustflags as a proxy for that change for now, but we could instead check version 1.55.
if workspace && env::var_os("CARGO_ENCODED_RUSTFLAGS").is_none() {
return None;
}
let name = if workspace {
"RUSTC_WORKSPACE_WRAPPER"
} else {
"RUSTC_WRAPPER"
};
if let Some(wrapper) = env::var_os(name) {
// NB: `OsStr` didn't get `len` or `is_empty` until 1.9.
if wrapper != OsString::new() {
return Some(wrapper.into());
}
}
None
}

View File

@ -133,6 +133,22 @@ fn probe_constant() {
ac.assert_min(1, 39, ac.probe_constant(r#""test".len()"#));
}
#[test]
fn probe_raw() {
let ac = AutoCfg::for_test().unwrap();
let prefix = if ac.no_std { "#![no_std]\n" } else { "" };
let f = |s| format!("{}{}", prefix, s);
// This attribute **must** be used at the crate level.
assert!(ac.probe_raw(&f("#![no_builtins]")).is_ok());
assert!(ac.probe_raw(&f("#![deny(dead_code)] fn x() {}")).is_err());
assert!(ac.probe_raw(&f("#![allow(dead_code)] fn x() {}")).is_ok());
assert!(ac
.probe_raw(&f("#![deny(dead_code)] pub fn x() {}"))
.is_ok());
}
#[test]
fn dir_does_not_contain_target() {
assert!(!super::dir_contains_target(

View File

@ -1,4 +1,3 @@
use std::path::Path;
use std::process::Command;
use std::str;
@ -22,9 +21,9 @@ impl Version {
}
}
pub fn from_rustc(rustc: &Path) -> Result<Self, Error> {
pub fn from_command(command: &mut Command) -> Result<Self, Error> {
// Get rustc's verbose version
let output = try!(Command::new(rustc)
let output = try!(command
.args(&["--version", "--verbose"])
.output()
.map_err(error::from_io));

12
pve-rs/vendor/autocfg/tests/wrap_ignored vendored Executable file
View File

@ -0,0 +1,12 @@
#!/bin/bash
for arg in "$@"; do
case "$arg" in
# Add our own version so we can check that the wrapper is used for that.
"--version") echo "release: 12345.6789.0" ;;
# Read all input so the writer doesn't get EPIPE when we exit.
"-") read -d "" PROBE ;;
esac
done
exit 0

View File

@ -39,13 +39,18 @@ fn test_wrappers() {
assert!(ac.probe_type("usize"));
assert!(!ac.probe_type("mesize"));
}
// Either way, we should have found the inner rustc version.
assert!(ac.probe_rustc_version(1, 0));
}
}
// Finally, make sure that `RUSTC_WRAPPER` is applied outermost
// by using something that doesn't pass through at all.
env::set_var("RUSTC_WRAPPER", "/bin/true");
env::set_var("RUSTC_WRAPPER", "./tests/wrap_ignored");
env::set_var("RUSTC_WORKSPACE_WRAPPER", "/bin/false");
let ac = autocfg::AutoCfg::new().unwrap();
assert!(ac.probe_type("mesize")); // anything goes!
// Make sure we also got the version from that wrapper.
assert!(ac.probe_rustc_version(12345, 6789));
}

View File

@ -1 +1 @@
{"files":{"Cargo.lock":"499472ec5fb35e1546de201b8644bf8b86d63beb038bdbdec69208cbc0c702df","Cargo.toml":"ee9076154780f56e4fa0b5348f3e3fda28c98868d60d91fee2a61a4c4140ab6d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"66627f6723d9c702276ae529aaf52fc2ed0fd13204ad510d0c9a2d63e6d7d68d","benches/benchmarks.rs":"029b78bb79052ec940eecfd18067b743925189202fc16015d3c4c25b05eb6d67","build.rs":"543e435aa924d7d6923e122f17a02e082bba2a4e874ae36fbfe0dcfa1aafa861","examples/backtrace.rs":"5da0c95ccebfaffbbe7b5a92b0e488017dc375cbb5b8fb2b7712dd65b2dfb2ba","examples/raw.rs":"eda88454164ce5ebdd99c9c70ea35a61d34f5aecfdb8fcb4efd4f9738d10b093","src/backtrace/dbghelp32.rs":"c225ccc1840eb85b5b972645c9de4c662507e70afbbc9a1af720bd20a0db128d","src/backtrace/dbghelp64.rs":"fb12d20ec46ce2620bbbc31aa68a9f24afd9628c8aef8728c0e517c1bed9bff2","src/backtrace/libunwind.rs":"6f8c15a8445bab64e948afdc8d216cddf1a5e2abad3a5240db27edfa75110351","src/backtrace/miri.rs":"c90348fa8c921594cc042bfd2b2b52b7efe6b7b74be9215522339042016b099e","src/backtrace/mod.rs":"fe4346e5a353aab0a40dbb46e4d2653652d8515861f089da40063f4d609b4c83","src/backtrace/noop.rs":"21943fa1f17169ecb1dfff47fc3ee4de7c5669d4abb3fb6f11f6e39845f14a24","src/capture.rs":"ef044cadeb96f18856be9481ed7e7ae57e212da37eb8a33b029a8bb563d633ec","src/dbghelp.rs":"b8c814c2cf4b87d7bf70b9ad0a3f062392d2c2e40b9e08762f334d856f62c20f","src/lib.rs":"4c77af61c4b869ad3d8f41620dceb96d4dfa5c8f77f76dcd038986f2502730cd","src/print.rs":"9d897a980e49e259a62b6d693567d99a43d36c71d69aba9aeb34c8e485fe39cd","src/print/fuchsia.rs":"eb6c02bfffc78dfbc6a30596902636bca869f36aa4a3f891d6b3739327dc9bc4","src/symbolize/dbghelp.rs":"74e56814523cd81e1dcf4b38550b35a00a3f730e6bada349679db8d5c67bc4e6","src/symbolize/gimli.rs":"acfc828c564f58fe75a6a3c8169d16c8f1ab086f0285dfc34ed894337c35b6f2","src/symbolize/gimli/coff.rs":"1a6d69aeb9c79d1acdeb054c1af050251c774d1328ef1a964d08f01c27f2bd2f","src/symbolize/gimli/elf.rs":"1f2628645fdb183f23586b40260abf8c0695a262d26e8999361965307fe32917","src/symbolize/gimli/libs_aix.rs":"bc19376a9735f99b63107fe2a389401be11427f48ddc12636aadeb8180dd1032","src/symbolize/gimli/libs_dl_iterate_phdr.rs":"2fc6a06b9bd6761646e66d247c688862d07ebb7ac44d9a894a09ef37cd246eff","src/symbolize/gimli/libs_haiku.rs":"0a0d4b37145e898f7068cadacccf362d8216e463e7026af2ce38d75ebfd74bea","src/symbolize/gimli/libs_illumos.rs":"4886675501ae3275d14bbfbff9fd9582c7f9c46a0dda2a775fb680e5267c08f7","src/symbolize/gimli/libs_libnx.rs":"8bd076a44460e89a25c2ebd287027de73de43ced2af3aef24ab7a2e21b6f2c90","src/symbolize/gimli/libs_macos.rs":"7f155b9a12ccb4914b2985c9fcaca250b8e3351164d7fbe8a3951acbd3f8cd84","src/symbolize/gimli/libs_windows.rs":"6459f8610ca1a0fd7456539ec604f5276c94b3d0d7331357eaed338e49220a02","src/symbolize/gimli/macho.rs":"3cb44a7ff72d0b8c619463c2346f2cfdc378996bb523ceb0c5ab22b1443035b6","src/symbolize/gimli/mmap_fake.rs":"9564fcf47000e70d521b31518e205c8e6ee09b7410fb1eb1e452721757ff54ba","src/symbolize/gimli/mmap_unix.rs":"7d3d7bc6e5d34e3ecb1fe8b30d36bed404b4b9cd79d3b771c91215abfabb9ad1","src/symbolize/gimli/mmap_windows.rs":"7b90a31abdb6b5e1cf60c2bf5cacc46440976aeca4a91695a20a6ba5b8f80fd1","src/symbolize/gimli/parse_running_mmaps_unix.rs":"b0bffbe15bc55920ecbeb6afe547f81a19a6e428e1a5038d33b8a5a837fe8c3b","src/symbolize/gimli/stash.rs":"e9b4c8b5849fda70c25a40be2f9a16473b601926cf96909087cfff25a8ab42b5","src/symbolize/gimli/xcoff.rs":"b3c70cdca95597375edac968d00f0db6d4f57eb14765bdee687b1b12d53b023a","src/symbolize/miri.rs":"f5201cc8a7de24ad3424d2472cb0af59cd28563d09cc0c21e998f4cee4367ade","src/symbolize/mod.rs":"b10f42bcd93d44f374911166dc20083cbb1458e4014d378dbc1da0fa7114de19","src/symbolize/noop.rs":"5d4432079b8ae2b9382945a57ae43df57bb4b7ed2e5956d4167e051a44567388","src/types.rs":"f43c94b99d57ca66a5cfe939a46016c95b2d69d82695fb52480f7a3e5b344fd9","src/windows.rs":"69f4750feb0f586a25c7d0057a281182a62cd402b3a92afe20cad1c925ac27c5","tests/accuracy/auxiliary.rs":"71d2238da401042e007ef5ee20336d6834724bae96d93c8c52d11a5a332d7d34","tests/accuracy/main.rs":"17d215192ebecbe2ab699514e7d83b1caeed069aa2de988a594633d0785a8570","tests/common/mod.rs":"733101288a48cf94d5a87a1957724deaf2650c3e4e8aa0190a4a7db62aa90d01","tests/concurrent-panics.rs":"b60279ad5c4fb9b2754807f35179cbc8fbd7acbe6e92ac6d0f416ae75db38705","tests/current-exe-mismatch.rs":"9cd0711c6d8c332adf8a45a9fef7cce191888e47bd8da60d52c5c7f727df6b98","tests/long_fn_name.rs":"12af8bcef41f2d4f9e2711cbe2a605e15ed47b571fd871f4da1fd159494d779a","tests/sgx-image-base.rs":"564d799ce613569b9d8b65ecf027e01719409fcf3d07c9179f3c7935e364bb41","tests/skip_inner_frames.rs":"073721fe85c8ba64492e4e0ca4f742d538f2d45bdda7461da24fc298aeea69ef","tests/smoke.rs":"7b834549b30df9035845b49692e6ea9b200b7e9d5ca25e54e5c516c6bd047850"},"package":"26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d"}
{"files":{"Cargo.lock":"f3e9673ade6061b3c3714f4cbe700ad55dc76415eee57a49a0f27b506e9c39aa","Cargo.toml":"de058aeed296a6115052eb887ab9c381f45ab7ee0dcf684de6e81dfa8f14cab7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"66627f6723d9c702276ae529aaf52fc2ed0fd13204ad510d0c9a2d63e6d7d68d","benches/benchmarks.rs":"029b78bb79052ec940eecfd18067b743925189202fc16015d3c4c25b05eb6d67","build.rs":"543e435aa924d7d6923e122f17a02e082bba2a4e874ae36fbfe0dcfa1aafa861","examples/backtrace.rs":"5da0c95ccebfaffbbe7b5a92b0e488017dc375cbb5b8fb2b7712dd65b2dfb2ba","examples/raw.rs":"eda88454164ce5ebdd99c9c70ea35a61d34f5aecfdb8fcb4efd4f9738d10b093","src/backtrace/dbghelp32.rs":"e2ff811f32672ff92ba464f641e768a290cd7d1ddccf29651164ea40c60f2dd0","src/backtrace/dbghelp64.rs":"bb19f57b881b65d2aa020b0bc30483cc448f30ca6ad329d745ae65024c56a779","src/backtrace/libunwind.rs":"2dcf3b2b5e0c9e901293d192e7746cf172b17c4e341019f0b37ee2a28e082c86","src/backtrace/miri.rs":"c90348fa8c921594cc042bfd2b2b52b7efe6b7b74be9215522339042016b099e","src/backtrace/mod.rs":"fe4346e5a353aab0a40dbb46e4d2653652d8515861f089da40063f4d609b4c83","src/backtrace/noop.rs":"21943fa1f17169ecb1dfff47fc3ee4de7c5669d4abb3fb6f11f6e39845f14a24","src/capture.rs":"d0b21a3b4d8221f4634c7c5116c0ad6a47bf1319b9b6aee2619dce564283ef75","src/dbghelp.rs":"506ede0fd839a3a86fede18ab264ef51ff10c598ade8112473c76a2c28efc975","src/lib.rs":"69c7d90448b63d621d442021f31e41cec9f42cf48ee4f75021c5fa139771aa2f","src/print.rs":"9d897a980e49e259a62b6d693567d99a43d36c71d69aba9aeb34c8e485fe39cd","src/print/fuchsia.rs":"eb6c02bfffc78dfbc6a30596902636bca869f36aa4a3f891d6b3739327dc9bc4","src/symbolize/dbghelp.rs":"787d2ebcda97c417bc2f63fdd20665839adaed30929a206d05738d31ef525eab","src/symbolize/gimli.rs":"7ca03c2fdad6a85ba05253625565515cd13fb3b0ff9e5c09592ec15fb407fef1","src/symbolize/gimli/coff.rs":"ff0675396fc63bb30822fe1b9def4da044e640e5d31452094738ff31600d6942","src/symbolize/gimli/elf.rs":"1f2628645fdb183f23586b40260abf8c0695a262d26e8999361965307fe32917","src/symbolize/gimli/libs_aix.rs":"bc19376a9735f99b63107fe2a389401be11427f48ddc12636aadeb8180dd1032","src/symbolize/gimli/libs_dl_iterate_phdr.rs":"2fc6a06b9bd6761646e66d247c688862d07ebb7ac44d9a894a09ef37cd246eff","src/symbolize/gimli/libs_haiku.rs":"0a0d4b37145e898f7068cadacccf362d8216e463e7026af2ce38d75ebfd74bea","src/symbolize/gimli/libs_illumos.rs":"4886675501ae3275d14bbfbff9fd9582c7f9c46a0dda2a775fb680e5267c08f7","src/symbolize/gimli/libs_libnx.rs":"8bd076a44460e89a25c2ebd287027de73de43ced2af3aef24ab7a2e21b6f2c90","src/symbolize/gimli/libs_macos.rs":"7f155b9a12ccb4914b2985c9fcaca250b8e3351164d7fbe8a3951acbd3f8cd84","src/symbolize/gimli/libs_windows.rs":"6459f8610ca1a0fd7456539ec604f5276c94b3d0d7331357eaed338e49220a02","src/symbolize/gimli/macho.rs":"d75cb9c2a3640031d5269425ec4fcc4f21f364d4406cac50ef6c5eb9a718fc2d","src/symbolize/gimli/mmap_fake.rs":"9564fcf47000e70d521b31518e205c8e6ee09b7410fb1eb1e452721757ff54ba","src/symbolize/gimli/mmap_unix.rs":"7d3d7bc6e5d34e3ecb1fe8b30d36bed404b4b9cd79d3b771c91215abfabb9ad1","src/symbolize/gimli/mmap_windows.rs":"7b90a31abdb6b5e1cf60c2bf5cacc46440976aeca4a91695a20a6ba5b8f80fd1","src/symbolize/gimli/parse_running_mmaps_unix.rs":"b0bffbe15bc55920ecbeb6afe547f81a19a6e428e1a5038d33b8a5a837fe8c3b","src/symbolize/gimli/stash.rs":"e9b4c8b5849fda70c25a40be2f9a16473b601926cf96909087cfff25a8ab42b5","src/symbolize/gimli/xcoff.rs":"b3c70cdca95597375edac968d00f0db6d4f57eb14765bdee687b1b12d53b023a","src/symbolize/miri.rs":"f5201cc8a7de24ad3424d2472cb0af59cd28563d09cc0c21e998f4cee4367ade","src/symbolize/mod.rs":"e20189e4e6c453bff4fa8fbea99d96eb9e05d524deef656dd2ac9a148b87d885","src/symbolize/noop.rs":"5d4432079b8ae2b9382945a57ae43df57bb4b7ed2e5956d4167e051a44567388","src/types.rs":"f43c94b99d57ca66a5cfe939a46016c95b2d69d82695fb52480f7a3e5b344fd9","src/windows.rs":"c0eef986a3d733747d1e0ad12850efd022430c04131b4114b69100de380ad400","tests/accuracy/auxiliary.rs":"71d2238da401042e007ef5ee20336d6834724bae96d93c8c52d11a5a332d7d34","tests/accuracy/main.rs":"851778c046bc5b51f91777716ffe9896e0f872197d069e2dcd9a8b5ef4c98b01","tests/common/mod.rs":"733101288a48cf94d5a87a1957724deaf2650c3e4e8aa0190a4a7db62aa90d01","tests/concurrent-panics.rs":"b60279ad5c4fb9b2754807f35179cbc8fbd7acbe6e92ac6d0f416ae75db38705","tests/current-exe-mismatch.rs":"9cd0711c6d8c332adf8a45a9fef7cce191888e47bd8da60d52c5c7f727df6b98","tests/long_fn_name.rs":"12af8bcef41f2d4f9e2711cbe2a605e15ed47b571fd871f4da1fd159494d779a","tests/sgx-image-base.rs":"564d799ce613569b9d8b65ecf027e01719409fcf3d07c9179f3c7935e364bb41","tests/skip_inner_frames.rs":"073721fe85c8ba64492e4e0ca4f742d538f2d45bdda7461da24fc298aeea69ef","tests/smoke.rs":"ad0a1894fc4922f03fe01b61e5fc563d2c9fd50a46b25faafc42c11966a791d6"},"package":"5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a"}

29
pve-rs/vendor/backtrace/Cargo.lock generated vendored
View File

@ -4,9 +4,9 @@ version = 3
[[package]]
name = "addr2line"
version = "0.21.0"
version = "0.22.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678"
dependencies = [
"gimli",
]
@ -19,7 +19,7 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "backtrace"
version = "0.3.71"
version = "0.3.73"
dependencies = [
"addr2line",
"cc",
@ -30,16 +30,15 @@ dependencies = [
"miniz_oxide",
"object",
"rustc-demangle",
"rustc-serialize",
"serde",
"winapi",
]
[[package]]
name = "cc"
version = "1.0.90"
version = "1.0.97"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5"
checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4"
[[package]]
name = "cfg-if"
@ -58,9 +57,9 @@ dependencies = [
[[package]]
name = "gimli"
version = "0.28.0"
version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd"
[[package]]
name = "libc"
@ -95,9 +94,9 @@ dependencies = [
[[package]]
name = "object"
version = "0.32.0"
version = "0.36.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77ac5bbd07aea88c60a577a1ce218075ffd59208b2d7ca97adf9bfc5aeb21ebe"
checksum = "576dfe1fc8f9df304abb159d767a29d0476f7750fbf8aa7ad07816004a207434"
dependencies = [
"memchr",
]
@ -122,15 +121,9 @@ dependencies = [
[[package]]
name = "rustc-demangle"
version = "0.1.23"
version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]]
name = "rustc-serialize"
version = "0.3.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe834bc780604f4674073badbad26d7219cadfb4a2275802db12cbae17498401"
checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
[[package]]
name = "serde"

View File

@ -13,12 +13,14 @@
edition = "2021"
rust-version = "1.65.0"
name = "backtrace"
version = "0.3.71"
version = "0.3.73"
authors = ["The Rust Project Developers"]
build = "build.rs"
exclude = ["/ci/"]
autoexamples = true
autotests = true
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = """
A library to acquire a stack trace (backtrace) at runtime in a Rust program.
"""
@ -28,42 +30,62 @@ readme = "README.md"
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-lang/backtrace-rs"
[lib]
name = "backtrace"
path = "src/lib.rs"
[[example]]
name = "backtrace"
path = "examples/backtrace.rs"
required-features = ["std"]
[[example]]
name = "raw"
path = "examples/raw.rs"
required-features = ["std"]
[[test]]
name = "skip_inner_frames"
path = "tests/skip_inner_frames.rs"
required-features = ["std"]
[[test]]
name = "long_fn_name"
path = "tests/long_fn_name.rs"
required-features = ["std"]
[[test]]
name = "smoke"
path = "tests/smoke.rs"
required-features = ["std"]
edition = "2021"
[[test]]
name = "accuracy"
path = "tests/accuracy/main.rs"
required-features = ["std"]
edition = "2021"
[[test]]
name = "concurrent-panics"
path = "tests/concurrent-panics.rs"
harness = false
required-features = ["std"]
[[test]]
name = "current-exe-mismatch"
path = "tests/current-exe-mismatch.rs"
harness = false
required-features = ["std"]
[[test]]
name = "sgx-image-base"
path = "tests/sgx-image-base.rs"
[[bench]]
name = "benchmarks"
path = "benches/benchmarks.rs"
[dependencies.cfg-if]
version = "1.0"
@ -74,11 +96,7 @@ optional = true
default-features = false
[dependencies.rustc-demangle]
version = "0.1.4"
[dependencies.rustc-serialize]
version = "0.3"
optional = true
version = "0.1.24"
[dependencies.serde]
version = "1.0"
@ -89,18 +107,16 @@ optional = true
version = "0.7"
[build-dependencies.cc]
version = "1.0.90"
version = "1.0.97"
[features]
coresymbolication = []
dbghelp = []
default = ["std"]
dl_iterate_phdr = []
dladdr = []
gimli-symbolize = []
kernel32 = []
libbacktrace = []
libunwind = []
serialize-rustc = ["rustc-serialize"]
serialize-serde = ["serde"]
std = []
unix-backtrace = []
@ -115,10 +131,12 @@ verify-winapi = [
"winapi/tlhelp32",
"winapi/winbase",
"winapi/winnt",
"winapi/winnls",
"winapi/stringapiset",
]
[target."cfg(not(all(windows, target_env = \"msvc\", not(target_vendor = \"uwp\"))))".dependencies.addr2line]
version = "0.21.0"
version = "0.22.0"
default-features = false
[target."cfg(not(all(windows, target_env = \"msvc\", not(target_vendor = \"uwp\"))))".dependencies.libc]
@ -130,7 +148,7 @@ version = "0.7.0"
default-features = false
[target."cfg(not(all(windows, target_env = \"msvc\", not(target_vendor = \"uwp\"))))".dependencies.object]
version = "0.32.0"
version = "0.36.0"
features = [
"read_core",
"elf",
@ -145,3 +163,6 @@ default-features = false
[target."cfg(windows)".dependencies.winapi]
version = "0.3.9"
optional = true
[lints.rust]
unexpected_cfgs = "allow"

View File

@ -1,6 +1,6 @@
//! Backtrace strategy for MSVC platforms.
//! Backtrace strategy for Windows platforms.
//!
//! This module contains the ability to generate a backtrace on MSVC using one
//! This module contains the ability to generate a backtrace on Windows using one
//! of two possible methods. The `StackWalkEx` function is primarily used if
//! possible, but not all systems have that. Failing that the `StackWalk64`
//! function is used instead. Note that `StackWalkEx` is favored because it

View File

@ -1,19 +1,10 @@
//! Backtrace strategy for MSVC platforms.
//! Backtrace strategy for Windows `x86_64` and `aarch64` platforms.
//!
//! This module contains the ability to capture a backtrace on MSVC using one
//! of three possible methods. For `x86_64` and `aarch64`, we use `RtlVirtualUnwind`
//! to walk the stack one frame at a time. This function is much faster than using
//! This module contains the ability to capture a backtrace on Windows using
//! `RtlVirtualUnwind` to walk the stack one frame at a time. This function is much faster than using
//! `dbghelp!StackWalk*` because it does not load debug info to report inlined frames.
//! We still report inlined frames during symbolization by consulting the appropriate
//! `dbghelp` functions.
//!
//! For all other platforms, primarily `i686`, the `StackWalkEx` function is used if
//! possible, but not all systems have that. Failing that the `StackWalk64` function
//! is used instead. Note that `StackWalkEx` is favored because it handles debuginfo
//! internally and returns inline frame information.
//!
//! Note that all dbghelp support is loaded dynamically, see `src/dbghelp.rs`
//! for more information about that.
#![allow(bad_style)]
@ -95,44 +86,66 @@ impl MyContext {
pub unsafe fn trace(cb: &mut dyn FnMut(&super::Frame) -> bool) {
use core::ptr;
// Capture the initial context to start walking from.
let mut context = core::mem::zeroed::<MyContext>();
RtlCaptureContext(&mut context.0);
// Call `RtlVirtualUnwind` to find the previous stack frame, walking until we hit ip = 0.
while context.ip() != 0 {
let mut base = 0;
loop {
let ip = context.ip();
let fn_entry = RtlLookupFunctionEntry(context.ip(), &mut base, ptr::null_mut());
// The base address of the module containing the function will be stored here
// when RtlLookupFunctionEntry returns successfully.
let mut base = 0;
let fn_entry = RtlLookupFunctionEntry(ip, &mut base, ptr::null_mut());
if fn_entry.is_null() {
// No function entry could be found - this may indicate a corrupt
// stack or that a binary was unloaded (amongst other issues). Stop
// walking and don't call the callback as we can't be confident in
// this frame or the rest of the stack.
break;
}
let frame = super::Frame {
inner: Frame {
base_address: fn_entry.cast::<c_void>(),
ip: context.ip() as *mut c_void,
base_address: base as *mut c_void,
ip: ip as *mut c_void,
sp: context.sp() as *mut c_void,
#[cfg(not(target_env = "gnu"))]
inline_context: None,
},
};
// We've loaded all the info about the current frame, so now call the
// callback.
if !cb(&frame) {
// Callback told us to stop, so we're done.
break;
}
// Unwind to the next frame.
let previous_ip = ip;
let previous_sp = context.sp();
let mut handler_data = 0usize;
let mut establisher_frame = 0;
RtlVirtualUnwind(
0,
base,
context.ip(),
ip,
fn_entry,
&mut context.0,
ptr::addr_of_mut!(handler_data).cast::<PVOID>(),
&mut establisher_frame,
ptr::null_mut(),
);
// RtlVirtualUnwind indicates the end of the stack in two different ways:
// * On x64, it sets the instruction pointer to 0.
// * On ARM64, it leaves the context unchanged (easiest way to check is
// to see if the instruction and stack pointers are the same).
// If we detect either of these, then unwinding is completed.
let ip = context.ip();
if ip == 0 || (ip == previous_ip && context.sp() == previous_sp) {
break;
}
}
}

View File

@ -15,7 +15,6 @@
//!
//! This is the default unwinding API for all non-Windows platforms currently.
use super::super::Bomb;
use core::ffi::c_void;
use core::ptr::addr_of_mut;
@ -100,6 +99,18 @@ impl Clone for Frame {
}
}
struct Bomb {
enabled: bool,
}
impl Drop for Bomb {
fn drop(&mut self) {
if self.enabled {
panic!("cannot panic during the backtrace function");
}
}
}
#[inline(always)]
pub unsafe fn trace(mut cb: &mut dyn FnMut(&super::Frame) -> bool) {
uw::_Unwind_Backtrace(trace_fn, addr_of_mut!(cb).cast());

View File

@ -1,5 +1,7 @@
#[cfg(feature = "serde")]
use crate::resolve;
use crate::PrintFmt;
use crate::{resolve, resolve_frame, trace, BacktraceFmt, Symbol, SymbolName};
use crate::{resolve_frame, trace, BacktraceFmt, Symbol, SymbolName};
use std::ffi::c_void;
use std::fmt;
use std::path::{Path, PathBuf};
@ -21,7 +23,6 @@ use serde::{Deserialize, Serialize};
/// This function requires the `std` feature of the `backtrace` crate to be
/// enabled, and the `std` feature is enabled by default.
#[derive(Clone)]
#[cfg_attr(feature = "serialize-rustc", derive(RustcDecodable, RustcEncodable))]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
pub struct Backtrace {
// Frames here are listed from top-to-bottom of the stack
@ -51,7 +52,7 @@ pub struct BacktraceFrame {
#[derive(Clone)]
enum Frame {
Raw(crate::Frame),
#[allow(dead_code)]
#[cfg(feature = "serde")]
Deserialized {
ip: usize,
symbol_address: usize,
@ -63,6 +64,7 @@ impl Frame {
fn ip(&self) -> *mut c_void {
match *self {
Frame::Raw(ref f) => f.ip(),
#[cfg(feature = "serde")]
Frame::Deserialized { ip, .. } => ip as *mut c_void,
}
}
@ -70,6 +72,7 @@ impl Frame {
fn symbol_address(&self) -> *mut c_void {
match *self {
Frame::Raw(ref f) => f.symbol_address(),
#[cfg(feature = "serde")]
Frame::Deserialized { symbol_address, .. } => symbol_address as *mut c_void,
}
}
@ -77,6 +80,7 @@ impl Frame {
fn module_base_address(&self) -> Option<*mut c_void> {
match *self {
Frame::Raw(ref f) => f.module_base_address(),
#[cfg(feature = "serde")]
Frame::Deserialized {
module_base_address,
..
@ -98,6 +102,7 @@ impl Frame {
};
match *self {
Frame::Raw(ref f) => resolve_frame(f, sym),
#[cfg(feature = "serde")]
Frame::Deserialized { ip, .. } => {
resolve(ip as *mut c_void, sym);
}
@ -116,7 +121,6 @@ impl Frame {
/// This function requires the `std` feature of the `backtrace` crate to be
/// enabled, and the `std` feature is enabled by default.
#[derive(Clone)]
#[cfg_attr(feature = "serialize-rustc", derive(RustcDecodable, RustcEncodable))]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
pub struct BacktraceSymbol {
name: Option<Vec<u8>>,
@ -440,53 +444,6 @@ impl fmt::Debug for BacktraceSymbol {
}
}
#[cfg(feature = "serialize-rustc")]
mod rustc_serialize_impls {
use super::*;
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
#[derive(RustcEncodable, RustcDecodable)]
struct SerializedFrame {
ip: usize,
symbol_address: usize,
module_base_address: Option<usize>,
symbols: Option<Vec<BacktraceSymbol>>,
}
impl Decodable for BacktraceFrame {
fn decode<D>(d: &mut D) -> Result<Self, D::Error>
where
D: Decoder,
{
let frame: SerializedFrame = SerializedFrame::decode(d)?;
Ok(BacktraceFrame {
frame: Frame::Deserialized {
ip: frame.ip,
symbol_address: frame.symbol_address,
module_base_address: frame.module_base_address,
},
symbols: frame.symbols,
})
}
}
impl Encodable for BacktraceFrame {
fn encode<E>(&self, e: &mut E) -> Result<(), E::Error>
where
E: Encoder,
{
let BacktraceFrame { frame, symbols } = self;
SerializedFrame {
ip: frame.ip() as usize,
symbol_address: frame.symbol_address() as usize,
module_base_address: frame.module_base_address().map(|addr| addr as usize),
symbols: symbols.clone(),
}
.encode(e)
}
}
}
#[cfg(feature = "serde")]
mod serde_impls {
use super::*;

View File

@ -376,16 +376,21 @@ pub fn init() -> Result<Init, ()> {
DBGHELP.ensure_open()?;
static mut INITIALIZED: bool = false;
if INITIALIZED {
return Ok(ret);
if !INITIALIZED {
set_optional_options();
INITIALIZED = true;
}
let orig = DBGHELP.SymGetOptions().unwrap()();
Ok(ret)
}
}
fn set_optional_options() -> Option<()> {
unsafe {
let orig = DBGHELP.SymGetOptions()?();
// Ensure that the `SYMOPT_DEFERRED_LOADS` flag is set, because
// according to MSVC's own docs about this: "This is the fastest, most
// efficient way to use the symbol handler.", so let's do that!
DBGHELP.SymSetOptions().unwrap()(orig | SYMOPT_DEFERRED_LOADS);
DBGHELP.SymSetOptions()?(orig | SYMOPT_DEFERRED_LOADS);
// Actually initialize symbols with MSVC. Note that this can fail, but we
// ignore it. There's not a ton of prior art for this per se, but LLVM
@ -399,7 +404,7 @@ pub fn init() -> Result<Init, ()> {
// the time, but now that it's using this crate it means that someone will
// get to initialization first and the other will pick up that
// initialization.
DBGHELP.SymInitializeW().unwrap()(GetCurrentProcess(), ptr::null_mut(), TRUE);
DBGHELP.SymInitializeW()?(GetCurrentProcess(), ptr::null_mut(), TRUE);
// The default search path for dbghelp will only look in the current working
// directory and (possibly) `_NT_SYMBOL_PATH` and `_NT_ALT_SYMBOL_PATH`.
@ -413,7 +418,7 @@ pub fn init() -> Result<Init, ()> {
search_path_buf.resize(1024, 0);
// Prefill the buffer with the current search path.
if DBGHELP.SymGetSearchPathW().unwrap()(
if DBGHELP.SymGetSearchPathW()?(
GetCurrentProcess(),
search_path_buf.as_mut_ptr(),
search_path_buf.len() as _,
@ -433,7 +438,7 @@ pub fn init() -> Result<Init, ()> {
let mut search_path = SearchPath::new(search_path_buf);
// Update the search path to include the directory of the executable and each DLL.
DBGHELP.EnumerateLoadedModulesW64().unwrap()(
DBGHELP.EnumerateLoadedModulesW64()?(
GetCurrentProcess(),
Some(enum_loaded_modules_callback),
((&mut search_path) as *mut SearchPath) as *mut c_void,
@ -442,11 +447,9 @@ pub fn init() -> Result<Init, ()> {
let new_search_path = search_path.finalize();
// Set the new search path.
DBGHELP.SymSetSearchPathW().unwrap()(GetCurrentProcess(), new_search_path.as_ptr());
INITIALIZED = true;
Ok(ret)
DBGHELP.SymSetSearchPathW()?(GetCurrentProcess(), new_search_path.as_ptr());
}
Some(())
}
struct SearchPath {

View File

@ -97,8 +97,6 @@
// irrelevant as this crate is developed out-of-tree.
#![cfg_attr(backtrace_in_libstd, allow(warnings))]
#![cfg_attr(not(feature = "std"), allow(dead_code))]
// We know this is deprecated, it's only here for back-compat reasons.
#![cfg_attr(feature = "rustc-serialize", allow(deprecated))]
#[cfg(feature = "std")]
#[macro_use]
@ -140,21 +138,6 @@ cfg_if::cfg_if! {
}
}
#[allow(dead_code)]
struct Bomb {
enabled: bool,
}
#[allow(dead_code)]
impl Drop for Bomb {
fn drop(&mut self) {
if self.enabled {
panic!("cannot panic during the backtrace function");
}
}
}
#[allow(dead_code)]
#[cfg(feature = "std")]
mod lock {
use std::boxed::Box;
@ -162,32 +145,95 @@ mod lock {
use std::ptr;
use std::sync::{Mutex, MutexGuard, Once};
/// A "Maybe" LockGuard
pub struct LockGuard(Option<MutexGuard<'static, ()>>);
/// The global lock, lazily allocated on first use
static mut LOCK: *mut Mutex<()> = ptr::null_mut();
static INIT: Once = Once::new();
// Whether this thread is the one that holds the lock
thread_local!(static LOCK_HELD: Cell<bool> = Cell::new(false));
impl Drop for LockGuard {
fn drop(&mut self) {
// Don't do anything if we're a LockGuard(None)
if self.0.is_some() {
LOCK_HELD.with(|slot| {
// Immediately crash if we somehow aren't the thread holding this lock
assert!(slot.get());
// We are no longer the thread holding this lock
slot.set(false);
});
}
// lock implicitly released here, if we're a LockGuard(Some(..))
}
}
/// Acquire a partially unsound(!!!) global re-entrant lock over
/// backtrace's internals.
///
/// That is, this lock can be acquired as many times as you want
/// on a single thread without deadlocking, allowing one thread
/// to acquire exclusive access to the ability to make backtraces.
/// Calls to this locking function are freely sprinkled in every place
/// where that needs to be enforced.
///
///
/// # Why
///
/// This was first introduced to guard uses of Windows' dbghelp API,
/// which isn't threadsafe. It's unclear if other things now rely on
/// this locking.
///
///
/// # How
///
/// The basic idea is to have a single global mutex, and a thread_local
/// boolean saying "yep this is the thread that acquired the mutex".
///
/// The first time a thread acquires the lock, it is handed a
/// `LockGuard(Some(..))` that will actually release the lock on Drop.
/// All subsequence attempts to lock on the same thread will see
/// that their thread acquired the lock, and get `LockGuard(None)`
/// which will do nothing when dropped.
///
///
/// # Safety
///
/// As long as you only ever assign the returned LockGuard to a freshly
/// declared local variable, it will do its job correctly, as the "first"
/// LockGuard will strictly outlive all subsequent LockGuards and
/// properly release the lock when the thread is done with backtracing.
///
/// However if you ever attempt to store a LockGuard beyond the scope
/// it was acquired in, it might actually be a `LockGuard(None)` that
/// doesn't actually hold the lock! In this case another thread might
/// acquire the lock and you'll get races this system was intended to
/// avoid!
///
/// This is why this is "partially unsound". As a public API this would
/// be unacceptable, but this is crate-private, and if you use this in
/// the most obvious and simplistic way it Just Works™.
///
/// Note however that std specifically bypasses this lock, and uses
/// the `*_unsynchronized` backtrace APIs. This is "fine" because
/// it wraps its own calls to backtrace in a non-reentrant Mutex
/// that prevents two backtraces from getting interleaved during printing.
pub fn lock() -> LockGuard {
// If we're the thread holding this lock, pretend to acquire the lock
// again by returning a LockGuard(None)
if LOCK_HELD.with(|l| l.get()) {
return LockGuard(None);
}
// Insist that we totally are the thread holding the lock
// (our thread will block until we are)
LOCK_HELD.with(|s| s.set(true));
unsafe {
// lazily allocate the lock if necessary
INIT.call_once(|| {
LOCK = Box::into_raw(Box::new(Mutex::new(())));
});
// ok *actually* try to acquire the lock, blocking as necessary
LockGuard(Some((*LOCK).lock().unwrap()))
}
}

View File

@ -19,7 +19,6 @@
use super::super::{dbghelp, windows::*};
use super::{BytesOrWideString, ResolveWhat, SymbolName};
use core::char;
use core::ffi::c_void;
use core::marker;
use core::mem;
@ -91,7 +90,7 @@ pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol))
ResolveWhat::Frame(frame) => {
resolve_with_inline(&dbghelp, frame.ip(), frame.inner.inline_context(), cb)
}
}
};
}
#[cfg(target_vendor = "win7")]
@ -116,7 +115,7 @@ pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol))
ResolveWhat::Frame(frame) => {
resolve_inner(&dbghelp, frame.ip(), frame.inner.inline_context(), cb)
}
}
};
}
/// Resolve the address using the legacy dbghelp API.
@ -129,13 +128,14 @@ unsafe fn resolve_legacy(
addr: *mut c_void,
_inline_context: Option<DWORD>,
cb: &mut dyn FnMut(&super::Symbol),
) {
) -> Option<()> {
let addr = super::adjust_ip(addr) as DWORD64;
do_resolve(
|info| dbghelp.SymFromAddrW()(GetCurrentProcess(), addr, &mut 0, info),
|line| dbghelp.SymGetLineFromAddrW64()(GetCurrentProcess(), addr, &mut 0, line),
cb,
)
);
Some(())
}
/// Resolve the address using the modern dbghelp APIs.
@ -147,22 +147,28 @@ unsafe fn resolve_with_inline(
addr: *mut c_void,
inline_context: Option<DWORD>,
cb: &mut dyn FnMut(&super::Symbol),
) {
) -> Option<()> {
let current_process = GetCurrentProcess();
// Ensure we have the functions we need. Return if any aren't found.
let SymFromInlineContextW = (*dbghelp.dbghelp()).SymFromInlineContextW()?;
let SymGetLineFromInlineContextW = (*dbghelp.dbghelp()).SymGetLineFromInlineContextW()?;
let addr = super::adjust_ip(addr) as DWORD64;
let (inlined_frame_count, inline_context) = if let Some(ic) = inline_context {
(0, ic)
} else {
let mut inlined_frame_count = dbghelp.SymAddrIncludeInlineTrace()(current_process, addr);
let SymAddrIncludeInlineTrace = (*dbghelp.dbghelp()).SymAddrIncludeInlineTrace()?;
let SymQueryInlineTrace = (*dbghelp.dbghelp()).SymQueryInlineTrace()?;
let mut inlined_frame_count = SymAddrIncludeInlineTrace(current_process, addr);
let mut inline_context = 0;
// If there is are inlined frames but we can't load them for some reason OR if there are no
// inlined frames, then we disregard inlined_frame_count and inline_context.
if (inlined_frame_count > 0
&& dbghelp.SymQueryInlineTrace()(
&& SymQueryInlineTrace(
current_process,
addr,
0,
@ -184,22 +190,14 @@ unsafe fn resolve_with_inline(
for inline_context in inline_context..last_inline_context {
do_resolve(
|info| {
dbghelp.SymFromInlineContextW()(current_process, addr, inline_context, &mut 0, info)
},
|info| SymFromInlineContextW(current_process, addr, inline_context, &mut 0, info),
|line| {
dbghelp.SymGetLineFromInlineContextW()(
current_process,
addr,
inline_context,
0,
&mut 0,
line,
)
SymGetLineFromInlineContextW(current_process, addr, inline_context, 0, &mut 0, line)
},
cb,
);
}
Some(())
}
unsafe fn do_resolve(
@ -225,26 +223,27 @@ unsafe fn do_resolve(
// the real value.
let name_len = ::core::cmp::min(info.NameLen as usize, info.MaxNameLen as usize - 1);
let name_ptr = info.Name.as_ptr().cast::<u16>();
let name = slice::from_raw_parts(name_ptr, name_len);
// Reencode the utf-16 symbol to utf-8 so we can use `SymbolName::new` like
// all other platforms
let mut name_len = 0;
let mut name_buffer = [0; 256];
{
let mut remaining = &mut name_buffer[..];
for c in char::decode_utf16(name.iter().cloned()) {
let c = c.unwrap_or(char::REPLACEMENT_CHARACTER);
let len = c.len_utf8();
if len < remaining.len() {
c.encode_utf8(remaining);
let tmp = remaining;
remaining = &mut tmp[len..];
name_len += len;
} else {
break;
}
}
let mut name_buffer = [0_u8; 256];
let mut name_len = WideCharToMultiByte(
CP_UTF8,
0,
name_ptr,
name_len as i32,
name_buffer.as_mut_ptr().cast::<i8>(),
name_buffer.len() as i32,
core::ptr::null_mut(),
core::ptr::null_mut(),
) as usize;
if name_len == 0 {
// If the returned length is zero that means the buffer wasn't big enough.
// However, the buffer will be filled with as much as will fit.
name_len = name_buffer.len();
} else if name_len > name_buffer.len() {
// This can't happen.
return;
}
let name = ptr::addr_of!(name_buffer[..name_len]);

View File

@ -30,15 +30,16 @@ cfg_if::cfg_if! {
if #[cfg(windows)] {
#[path = "gimli/mmap_windows.rs"]
mod mmap;
} else if #[cfg(target_vendor = "apple")] {
#[path = "gimli/mmap_unix.rs"]
mod mmap;
} else if #[cfg(any(
target_os = "android",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "haiku",
target_os = "hurd",
target_os = "ios",
target_os = "linux",
target_os = "macos",
target_os = "openbsd",
target_os = "solaris",
target_os = "illumos",
@ -195,12 +196,7 @@ cfg_if::cfg_if! {
if #[cfg(windows)] {
mod coff;
use self::coff::{handle_split_dwarf, Object};
} else if #[cfg(any(
target_os = "macos",
target_os = "ios",
target_os = "tvos",
target_os = "watchos",
))] {
} else if #[cfg(any(target_vendor = "apple"))] {
mod macho;
use self::macho::{handle_split_dwarf, Object};
} else if #[cfg(target_os = "aix")] {
@ -216,12 +212,7 @@ cfg_if::cfg_if! {
if #[cfg(windows)] {
mod libs_windows;
use libs_windows::native_libraries;
} else if #[cfg(any(
target_os = "macos",
target_os = "ios",
target_os = "tvos",
target_os = "watchos",
))] {
} else if #[cfg(target_vendor = "apple")] {
mod libs_macos;
use libs_macos::native_libraries;
} else if #[cfg(target_os = "illumos")] {
@ -472,10 +463,7 @@ pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol))
}
if !any_frames {
if let Some(name) = cx.object.search_symtab(addr as u64) {
call(Symbol::Symtab {
addr: addr as *mut c_void,
name,
});
call(Symbol::Symtab { name });
}
}
});
@ -491,7 +479,7 @@ pub enum Symbol<'a> {
},
/// Couldn't find debug information, but we found it in the symbol table of
/// the elf executable.
Symtab { addr: *mut c_void, name: &'a [u8] },
Symtab { name: &'a [u8] },
}
impl Symbol<'_> {

View File

@ -51,19 +51,15 @@ impl<'a> Object<'a> {
// note that the sections are 1-indexed because the zero section
// is special (apparently).
let mut symbols = Vec::new();
let mut i = 0;
let len = symtab.len();
while i < len {
let sym = symtab.symbol(i).ok()?;
i += 1 + sym.number_of_aux_symbols as usize;
let section_number = sym.section_number.get(LE);
if sym.derived_type() != object::pe::IMAGE_SYM_DTYPE_FUNCTION || section_number == 0 {
for (_, sym) in symtab.iter() {
if sym.derived_type() != object::pe::IMAGE_SYM_DTYPE_FUNCTION {
continue;
}
let Some(section_index) = sym.section() else {
continue;
};
let addr = usize::try_from(sym.value.get(LE)).ok()?;
let section = sections
.section(usize::try_from(section_number).ok()?)
.ok()?;
let section = sections.section(section_index).ok()?;
let va = usize::try_from(section.virtual_address.get(LE)).ok()?;
symbols.push((addr + va + image_base, sym));
}

View File

@ -281,20 +281,12 @@ impl<'a> Object<'a> {
}
}
fn object_mapping(path: &[u8]) -> Option<Mapping> {
fn object_mapping(file: &object::read::ObjectMapFile<'_>) -> Option<Mapping> {
use super::mystd::ffi::OsStr;
use super::mystd::os::unix::prelude::*;
let map;
// `N_OSO` symbol names can be either `/path/to/object.o` or `/path/to/archive.a(object.o)`.
let member_name = if let Some((archive_path, member_name)) = split_archive_path(path) {
map = super::mmap(Path::new(OsStr::from_bytes(archive_path)))?;
Some(member_name)
} else {
map = super::mmap(Path::new(OsStr::from_bytes(path)))?;
None
};
let map = super::mmap(Path::new(OsStr::from_bytes(file.path())))?;
let member_name = file.member();
Mapping::mk(map, |data, stash| {
let data = match member_name {
Some(member_name) => {
@ -314,16 +306,6 @@ fn object_mapping(path: &[u8]) -> Option<Mapping> {
})
}
fn split_archive_path(path: &[u8]) -> Option<(&[u8], &[u8])> {
let (last, path) = path.split_last()?;
if *last != b')' {
return None;
}
let index = path.iter().position(|&x| x == b'(')?;
let (archive, rest) = path.split_at(index);
Some((archive, &rest[1..]))
}
pub(super) fn handle_split_dwarf<'data>(
_package: Option<&gimli::DwarfPackage<EndianSlice<'data, Endian>>>,
_stash: &'data Stash,

View File

@ -292,32 +292,15 @@ cfg_if::cfg_if! {
OptionCppSymbol(None)
}
}
} else {
use core::marker::PhantomData;
// Make sure to keep this zero-sized, so that the `cpp_demangle` feature
// has no cost when disabled.
struct OptionCppSymbol<'a>(PhantomData<&'a ()>);
impl<'a> OptionCppSymbol<'a> {
fn parse(_: &'a [u8]) -> OptionCppSymbol<'a> {
OptionCppSymbol(PhantomData)
}
fn none() -> OptionCppSymbol<'a> {
OptionCppSymbol(PhantomData)
}
}
}
}
/// A wrapper around a symbol name to provide ergonomic accessors to the
/// demangled name, the raw bytes, the raw string, etc.
// Allow dead code for when the `cpp_demangle` feature is not enabled.
#[allow(dead_code)]
pub struct SymbolName<'a> {
bytes: &'a [u8],
demangled: Option<Demangle<'a>>,
#[cfg(feature = "cpp_demangle")]
cpp_demangled: OptionCppSymbol<'a>,
}
@ -327,6 +310,7 @@ impl<'a> SymbolName<'a> {
let str_bytes = str::from_utf8(bytes).ok();
let demangled = str_bytes.and_then(|s| try_demangle(s).ok());
#[cfg(feature = "cpp_demangle")]
let cpp = if demangled.is_none() {
OptionCppSymbol::parse(bytes)
} else {
@ -336,6 +320,7 @@ impl<'a> SymbolName<'a> {
SymbolName {
bytes: bytes,
demangled: demangled,
#[cfg(feature = "cpp_demangle")]
cpp_demangled: cpp,
}
}
@ -380,66 +365,46 @@ fn format_symbol_name(
Ok(())
}
cfg_if::cfg_if! {
if #[cfg(feature = "cpp_demangle")] {
impl<'a> fmt::Display for SymbolName<'a> {
impl<'a> fmt::Display for SymbolName<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(ref s) = self.demangled {
s.fmt(f)
} else if let Some(ref cpp) = self.cpp_demangled.0 {
cpp.fmt(f)
} else {
return s.fmt(f);
}
#[cfg(feature = "cpp_demangle")]
{
if let Some(ref cpp) = self.cpp_demangled.0 {
return cpp.fmt(f);
}
}
format_symbol_name(fmt::Display::fmt, self.bytes, f)
}
}
}
} else {
impl<'a> fmt::Display for SymbolName<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(ref s) = self.demangled {
s.fmt(f)
} else {
format_symbol_name(fmt::Display::fmt, self.bytes, f)
}
}
}
}
}
cfg_if::cfg_if! {
if #[cfg(all(feature = "std", feature = "cpp_demangle"))] {
impl<'a> fmt::Debug for SymbolName<'a> {
impl<'a> fmt::Debug for SymbolName<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use std::fmt::Write;
if let Some(ref s) = self.demangled {
return s.fmt(f)
return s.fmt(f);
}
#[cfg(all(feature = "std", feature = "cpp_demangle"))]
{
use std::fmt::Write;
// This may to print if the demangled symbol isn't actually
// valid, so handle the error here gracefully by not propagating
// it outwards.
if let Some(ref cpp) = self.cpp_demangled.0 {
let mut s = String::new();
if write!(s, "{cpp}").is_ok() {
return s.fmt(f)
return s.fmt(f);
}
}
}
format_symbol_name(fmt::Debug::fmt, self.bytes, f)
}
}
} else {
impl<'a> fmt::Debug for SymbolName<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(ref s) = self.demangled {
s.fmt(f)
} else {
format_symbol_name(fmt::Debug::fmt, self.bytes, f)
}
}
}
}
}
/// Attempt to reclaim that cached memory used to symbolicate addresses.
@ -453,7 +418,7 @@ cfg_if::cfg_if! {
/// While this function is always available it doesn't actually do anything on
/// most implementations. Libraries like dbghelp or libbacktrace do not provide
/// facilities to deallocate state and manage the allocated memory. For now the
/// `gimli-symbolize` feature of this crate is the only feature where this
/// `std` feature of this crate is the only feature where this
/// function has any effect.
#[cfg(feature = "std")]
pub fn clear_symbol_cache() {

View File

@ -38,6 +38,8 @@ cfg_if::cfg_if! {
pub use winapi::um::tlhelp32::*;
pub use winapi::um::winbase::*;
pub use winapi::um::winnt::*;
pub use winapi::um::winnls::*;
pub use winapi::um::stringapiset::*;
// Work around winapi not having this function on aarch64.
#[cfg(target_arch = "aarch64")]
@ -379,6 +381,7 @@ ffi! {
pub const INVALID_HANDLE_VALUE: HANDLE = -1isize as HANDLE;
pub const MAX_MODULE_NAME32: usize = 255;
pub const MAX_PATH: usize = 260;
pub const CP_UTF8: u32 = 65001;
pub type DWORD = u32;
pub type PDWORD = *mut u32;
@ -456,6 +459,16 @@ ffi! {
lpme: LPMODULEENTRY32W,
) -> BOOL;
pub fn lstrlenW(lpstring: PCWSTR) -> i32;
pub fn WideCharToMultiByte(
codepage: u32,
dwflags: u32,
lpwidecharstr: PCWSTR,
cchwidechar: i32,
lpmultibytestr: *mut i8,
cbmultibyte: i32,
lpdefaultchar: *const i8,
lpuseddefaultchar: *mut BOOL
) -> i32;
}
}

View File

@ -1,3 +1,4 @@
#![cfg(dbginfo = "collapsible")]
mod auxiliary;
macro_rules! pos {
@ -6,6 +7,7 @@ macro_rules! pos {
};
}
#[collapse_debuginfo(yes)]
macro_rules! check {
($($pos:expr),*) => ({
verify(&[$($pos,)* pos!()]);
@ -29,7 +31,7 @@ fn doit() {
dir.pop();
if cfg!(windows) {
dir.push("dylib_dep.dll");
} else if cfg!(target_os = "macos") {
} else if cfg!(target_vendor = "apple") {
dir.push("libdylib_dep.dylib");
} else if cfg!(target_os = "aix") {
dir.push("libdylib_dep.a");

View File

@ -230,18 +230,6 @@ fn many_threads() {
}
}
#[test]
#[cfg(feature = "rustc-serialize")]
fn is_rustc_serialize() {
extern crate rustc_serialize;
fn is_encode<T: rustc_serialize::Encodable>() {}
fn is_decode<T: rustc_serialize::Decodable>() {}
is_encode::<backtrace::Backtrace>();
is_decode::<backtrace::Backtrace>();
}
#[test]
#[cfg(feature = "serde")]
fn is_serde() {

View File

@ -1 +0,0 @@
{"files":{"Cargo.lock":"36372e08a5cf4b713529f222928466b4d9bdc4116896bb9f121022c3b66502f1","Cargo.toml":"a261c28ccafada2884a75cab0e4e67326bafe720d58c937dc235521105982165","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0dd882e53de11566d50f8e8e2d5a651bcf3fabee4987d70f306233cf39094ba7","README.md":"df01f5b4317d601e7de86743f9818aec9196abf9e298f5e47679b7a966ecd945","RELEASE-NOTES.md":"208162655e96cdf04949b6c0a53f24985095c8b188d7ea2c1017a23ce45e25eb","benches/benchmarks.rs":"da4a49294a7fcaf718f2b062a52ed669ca096abce6c57b4025efdd24825048c2","clippy.toml":"b26be4d15ed059985ce6994f11817fd7562046f46e460a0dc64dbb71cfc246d1","examples/base64.rs":"b75ead2199a9b4389c69fe6f1ae988176a263b8fc84e7a4fea1d7e5a41592078","icon_CLion.svg":"cffa044ba75cb998ee3306991dc4a3755ec2f39ab95ddd4b74bc21988389020f","src/alphabet.rs":"3461a34bd63c10cfe232deb5dd42e2ec2dfb5decd508caf31ec2a1826ad68131","src/chunked_encoder.rs":"edfdbb9a4329b80fb2c769ada81e234e00839e0fa85faaa70bacf40ce12e951c","src/decode.rs":"666ca75ccd975f0548d37312d2843ca4703b83697a044839bbefeba8f4f7874a","src/display.rs":"31bf3e19274a0b80dd8948a81ea535944f756ef5b88736124c940f5fe1e8c71c","src/encode.rs":"44ddcc162f3fe9817b6e857dda0a3b9197b90a657e5f71c44aacabf5431ccf7d","src/engine/general_purpose/decode.rs":"ba8a76d333ab96dd07b3f84bd6d405d690d2d17e84bd0878f05245a82dc16853","src/engine/general_purpose/decode_suffix.rs":"71ceb066b73e8cc833916e2cedbf0a01b07c2f16e30b2b2f63aff1c823874b51","src/engine/general_purpose/mod.rs":"9f49375fc03166a491acf464daa7a9e6540fdc2cca407da9a248e15640952c20","src/engine/mod.rs":"15210115e5f99e0d252a1240922deb1516778e318564f92a9d880a82fd82a55e","src/engine/naive.rs":"dc166010633e8de0fbff31e2f05d128506f3e0f34a6358c1a825b59a8ea1af0d","src/engine/tests.rs":"37bee2de07343bf5d37720f29cda291e8562f2363704e0ad91862d5991568d22","src/lib.rs":"3f964521aea13dbe49438ffed630a55471401da4c55c40506969697b649237f0","src/prelude.rs":"c1587138e5301ac797c5c362cb3638649b33f79c20c16db6f38ad44330540752","src/read/decoder.rs":"cc87daa4c52a23d1275352bccf07468baf2b60e90b2ac14f89a94254697cb83c","src/read/decoder_tests.rs":"edeee377e70095532be1625d0148de2273b739e9069a05e616d3e67877d92f1d","src/read/mod.rs":"e0b714eda02d16b1ffa6f78fd09b2f963e01c881b1f7c17b39db4e904be5e746","src/tests.rs":"90cb9f8a1ccb7c4ddc4f8618208e0031fc97e0df0e5aa466d6a5cf45d25967d8","src/write/encoder.rs":"c889c853249220fe2ddaeb77ee6e2ee2945f7db88cd6658ef89ff71b81255ea8","src/write/encoder_string_writer.rs":"0326c9d120369b9bbc35697b5b9b141bed24283374c93d5af1052eb042e47799","src/write/encoder_tests.rs":"28695a485b17cf5db73656aae5d90127f726e02c6d70efd83e5ab53a4cc17b38","src/write/mod.rs":"73cd98dadc9d712b3fefd9449d97e825e097397441b90588e0051e4d3b0911b9","tests/encode.rs":"5309f4538b1df611436f7bfba7409c725161b6f841b1bbf8d9890ae185de7d88","tests/tests.rs":"78efcf0dc4bb6ae52f7a91fcad89e44e4dce578224c36b4e6c1c306459be8500"},"package":"9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"}

1492
pve-rs/vendor/base64-0.21.7/Cargo.lock generated vendored

File diff suppressed because it is too large Load Diff

View File

@ -1,88 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
rust-version = "1.48.0"
name = "base64"
version = "0.21.7"
authors = [
"Alice Maz <alice@alicemaz.com>",
"Marshall Pierce <marshall@mpierce.org>",
]
description = "encodes and decodes base64 as bytes or utf8"
documentation = "https://docs.rs/base64"
readme = "README.md"
keywords = [
"base64",
"utf8",
"encode",
"decode",
"no_std",
]
categories = ["encoding"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/marshallpierce/rust-base64"
[package.metadata.docs.rs]
rustdoc-args = ["--generate-link-to-definition"]
[profile.bench]
debug = 2
[profile.test]
opt-level = 3
[[example]]
name = "base64"
required-features = ["std"]
[[test]]
name = "tests"
required-features = ["alloc"]
[[test]]
name = "encode"
required-features = ["alloc"]
[[bench]]
name = "benchmarks"
harness = false
required-features = ["std"]
[dev-dependencies.clap]
version = "3.2.25"
features = ["derive"]
[dev-dependencies.criterion]
version = "0.4.0"
[dev-dependencies.once_cell]
version = "1"
[dev-dependencies.rand]
version = "0.8.5"
features = ["small_rng"]
[dev-dependencies.rstest]
version = "0.13.0"
[dev-dependencies.rstest_reuse]
version = "0.6.0"
[dev-dependencies.strum]
version = "0.25"
features = ["derive"]
[features]
alloc = []
default = ["std"]
std = ["alloc"]

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Alice Maz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,154 +0,0 @@
# [base64](https://crates.io/crates/base64)
[![](https://img.shields.io/crates/v/base64.svg)](https://crates.io/crates/base64) [![Docs](https://docs.rs/base64/badge.svg)](https://docs.rs/base64) [![CircleCI](https://circleci.com/gh/marshallpierce/rust-base64/tree/master.svg?style=shield)](https://circleci.com/gh/marshallpierce/rust-base64/tree/master) [![codecov](https://codecov.io/gh/marshallpierce/rust-base64/branch/master/graph/badge.svg)](https://codecov.io/gh/marshallpierce/rust-base64) [![unsafe forbidden](https://img.shields.io/badge/unsafe-forbidden-success.svg)](https://github.com/rust-secure-code/safety-dance/)
<a href="https://www.jetbrains.com/?from=rust-base64"><img src="/icon_CLion.svg" height="40px"/></a>
Made with CLion. Thanks to JetBrains for supporting open source!
It's base64. What more could anyone want?
This library's goals are to be *correct* and *fast*. It's thoroughly tested and widely used. It exposes functionality at
multiple levels of abstraction so you can choose the level of convenience vs performance that you want,
e.g. `decode_engine_slice` decodes into an existing `&mut [u8]` and is pretty fast (2.6GiB/s for a 3 KiB input),
whereas `decode_engine` allocates a new `Vec<u8>` and returns it, which might be more convenient in some cases, but is
slower (although still fast enough for almost any purpose) at 2.1 GiB/s.
See the [docs](https://docs.rs/base64) for all the details.
## FAQ
### I need to decode base64 with whitespace/null bytes/other random things interspersed in it. What should I do?
Remove non-base64 characters from your input before decoding.
If you have a `Vec` of base64, [retain](https://doc.rust-lang.org/std/vec/struct.Vec.html#method.retain) can be used to
strip out whatever you need removed.
If you have a `Read` (e.g. reading a file or network socket), there are various approaches.
- Use [iter_read](https://crates.io/crates/iter-read) together with `Read`'s `bytes()` to filter out unwanted bytes.
- Implement `Read` with a `read()` impl that delegates to your actual `Read`, and then drops any bytes you don't want.
### I need to line-wrap base64, e.g. for MIME/PEM.
[line-wrap](https://crates.io/crates/line-wrap) does just that.
### I want canonical base64 encoding/decoding.
First, don't do this. You should no more expect Base64 to be canonical than you should expect compression algorithms to
produce canonical output across all usage in the wild (hint: they don't).
However, [people are drawn to their own destruction like moths to a flame](https://eprint.iacr.org/2022/361), so here we
are.
There are two opportunities for non-canonical encoding (and thus, detection of the same during decoding): the final bits
of the last encoded token in two or three token suffixes, and the `=` token used to inflate the suffix to a full four
tokens.
The trailing bits issue is unavoidable: with 6 bits available in each encoded token, 1 input byte takes 2 tokens,
with the second one having some bits unused. Same for two input bytes: 16 bits, but 3 tokens have 18 bits. Unless we
decide to stop shipping whole bytes around, we're stuck with those extra bits that a sneaky or buggy encoder might set
to 1 instead of 0.
The `=` pad bytes, on the other hand, are entirely a self-own by the Base64 standard. They do not affect decoding other
than to provide an opportunity to say "that padding is incorrect". Exabytes of storage and transfer have no doubt been
wasted on pointless `=` bytes. Somehow we all seem to be quite comfortable with, say, hex-encoded data just stopping
when it's done rather than requiring a confirmation that the author of the encoder could count to four. Anyway, there
are two ways to make pad bytes predictable: require canonical padding to the next multiple of four bytes as per the RFC,
or, if you control all producers and consumers, save a few bytes by requiring no padding (especially applicable to the
url-safe alphabet).
All `Engine` implementations must at a minimum support treating non-canonical padding of both types as an error, and
optionally may allow other behaviors.
## Rust version compatibility
The minimum supported Rust version is 1.48.0.
# Contributing
Contributions are very welcome. However, because this library is used widely, and in security-sensitive contexts, all
PRs will be carefully scrutinized. Beyond that, this sort of low level library simply needs to be 100% correct. Nobody
wants to chase bugs in encoding of any sort.
All this means that it takes me a fair amount of time to review each PR, so it might take quite a while to carve out the
free time to give each PR the attention it deserves. I will get to everyone eventually!
## Developing
Benchmarks are in `benches/`.
```bash
cargo bench
```
## no_std
This crate supports no_std. By default the crate targets std via the `std` feature. You can deactivate
the `default-features` to target `core` instead. In that case you lose out on all the functionality revolving
around `std::io`, `std::error::Error`, and heap allocations. There is an additional `alloc` feature that you can activate
to bring back the support for heap allocations.
## Profiling
On Linux, you can use [perf](https://perf.wiki.kernel.org/index.php/Main_Page) for profiling. Then compile the
benchmarks with `cargo bench --no-run`.
Run the benchmark binary with `perf` (shown here filtering to one particular benchmark, which will make the results
easier to read). `perf` is only available to the root user on most systems as it fiddles with event counters in your
CPU, so use `sudo`. We need to run the actual benchmark binary, hence the path into `target`. You can see the actual
full path with `cargo bench -v`; it will print out the commands it runs. If you use the exact path
that `bench` outputs, make sure you get the one that's for the benchmarks, not the tests. You may also want
to `cargo clean` so you have only one `benchmarks-` binary (they tend to accumulate).
```bash
sudo perf record target/release/deps/benchmarks-* --bench decode_10mib_reuse
```
Then analyze the results, again with perf:
```bash
sudo perf annotate -l
```
You'll see a bunch of interleaved rust source and assembly like this. The section with `lib.rs:327` is telling us that
4.02% of samples saw the `movzbl` aka bit shift as the active instruction. However, this percentage is not as exact as
it seems due to a phenomenon called *skid*. Basically, a consequence of how fancy modern CPUs are is that this sort of
instruction profiling is inherently inaccurate, especially in branch-heavy code.
```text
lib.rs:322 0.70 : 10698: mov %rdi,%rax
2.82 : 1069b: shr $0x38,%rax
: if morsel == decode_tables::INVALID_VALUE {
: bad_byte_index = input_index;
: break;
: };
: accum = (morsel as u64) << 58;
lib.rs:327 4.02 : 1069f: movzbl (%r9,%rax,1),%r15d
: // fast loop of 8 bytes at a time
: while input_index < length_of_full_chunks {
: let mut accum: u64;
:
: let input_chunk = BigEndian::read_u64(&input_bytes[input_index..(input_index + 8)]);
: morsel = decode_table[(input_chunk >> 56) as usize];
lib.rs:322 3.68 : 106a4: cmp $0xff,%r15
: if morsel == decode_tables::INVALID_VALUE {
0.00 : 106ab: je 1090e <base64::decode_config_buf::hbf68a45fefa299c1+0x46e>
```
## Fuzzing
This uses [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz). See `fuzz/fuzzers` for the available fuzzing scripts.
To run, use an invocation like these:
```bash
cargo +nightly fuzz run roundtrip
cargo +nightly fuzz run roundtrip_no_pad
cargo +nightly fuzz run roundtrip_random_config -- -max_len=10240
cargo +nightly fuzz run decode_random
```
## License
This project is dual-licensed under MIT and Apache 2.0.

View File

@ -1,261 +0,0 @@
# 0.21.7
- Support getting an alphabet's contents as a str via `Alphabet::as_str()`
# 0.21.6
- Improved introductory documentation and example
# 0.21.5
- Add `Debug` and `Clone` impls for the general purpose Engine
# 0.21.4
- Make `encoded_len` `const`, allowing the creation of arrays sized to encode compile-time-known data lengths
# 0.21.3
- Implement `source` instead of `cause` on Error types
- Roll back MSRV to 1.48.0 so Debian can continue to live in a time warp
- Slightly faster chunked encoding for short inputs
- Decrease binary size
# 0.21.2
- Rollback MSRV to 1.57.0 -- only dev dependencies need 1.60, not the main code
# 0.21.1
- Remove the possibility of panicking during decoded length calculations
- `DecoderReader` no longer sometimes erroneously ignores
padding [#226](https://github.com/marshallpierce/rust-base64/issues/226)
## Breaking changes
- `Engine.internal_decode` return type changed
- Update MSRV to 1.60.0
# 0.21.0
## Migration
### Functions
| < 0.20 function | 0.21 equivalent |
|-------------------------|-------------------------------------------------------------------------------------|
| `encode()` | `engine::general_purpose::STANDARD.encode()` or `prelude::BASE64_STANDARD.encode()` |
| `encode_config()` | `engine.encode()` |
| `encode_config_buf()` | `engine.encode_string()` |
| `encode_config_slice()` | `engine.encode_slice()` |
| `decode()` | `engine::general_purpose::STANDARD.decode()` or `prelude::BASE64_STANDARD.decode()` |
| `decode_config()` | `engine.decode()` |
| `decode_config_buf()` | `engine.decode_vec()` |
| `decode_config_slice()` | `engine.decode_slice()` |
The short-lived 0.20 functions were the 0.13 functions with `config` replaced with `engine`.
### Padding
If applicable, use the preset engines `engine::STANDARD`, `engine::STANDARD_NO_PAD`, `engine::URL_SAFE`,
or `engine::URL_SAFE_NO_PAD`.
The `NO_PAD` ones require that padding is absent when decoding, and the others require that
canonical padding is present .
If you need the < 0.20 behavior that did not care about padding, or want to recreate < 0.20.0's predefined `Config`s
precisely, see the following table.
| 0.13.1 Config | 0.20.0+ alphabet | `encode_padding` | `decode_padding_mode` |
|-----------------|------------------|------------------|-----------------------|
| STANDARD | STANDARD | true | Indifferent |
| STANDARD_NO_PAD | STANDARD | false | Indifferent |
| URL_SAFE | URL_SAFE | true | Indifferent |
| URL_SAFE_NO_PAD | URL_SAFE | false | Indifferent |
# 0.21.0-rc.1
- Restore the ability to decode into a slice of precisely the correct length with `Engine.decode_slice_unchecked`.
- Add `Engine` as a `pub use` in `prelude`.
# 0.21.0-beta.2
## Breaking changes
- Re-exports of preconfigured engines in `engine` are removed in favor of `base64::prelude::...` that are better suited
to those who wish to `use` the entire path to a name.
# 0.21.0-beta.1
## Breaking changes
- `FastPortable` was only meant to be an interim name, and shouldn't have shipped in 0.20. It is now `GeneralPurpose` to
make its intended usage more clear.
- `GeneralPurpose` and its config are now `pub use`'d in the `engine` module for convenience.
- Change a few `from()` functions to be `new()`. `from()` causes confusing compiler errors because of confusion
with `From::from`, and is a little misleading because some of those invocations are not very cheap as one would
usually expect from a `from` call.
- `encode*` and `decode*` top level functions are now methods on `Engine`.
- `DEFAULT_ENGINE` was replaced by `engine::general_purpose::STANDARD`
- Predefined engine consts `engine::general_purpose::{STANDARD, STANDARD_NO_PAD, URL_SAFE, URL_SAFE_NO_PAD}`
- These are `pub use`d into `engine` as well
- The `*_slice` decode/encode functions now return an error instead of panicking when the output slice is too small
- As part of this, there isn't now a public way to decode into a slice _exactly_ the size needed for inputs that
aren't multiples of 4 tokens. If adding up to 2 bytes to always be a multiple of 3 bytes for the decode buffer is
a problem, file an issue.
## Other changes
- `decoded_len_estimate()` is provided to make it easy to size decode buffers correctly.
# 0.20.0
## Breaking changes
- Update MSRV to 1.57.0
- Decoding can now either ignore padding, require correct padding, or require no padding. The default is to require
correct padding.
- The `NO_PAD` config now requires that padding be absent when decoding.
## 0.20.0-alpha.1
### Breaking changes
- Extended the `Config` concept into the `Engine` abstraction, allowing the user to pick different encoding / decoding
implementations.
- What was formerly the only algorithm is now the `FastPortable` engine, so named because it's portable (works on
any CPU) and relatively fast.
- This opens the door to a portable constant-time
implementation ([#153](https://github.com/marshallpierce/rust-base64/pull/153),
presumably `ConstantTimePortable`?) for security-sensitive applications that need side-channel resistance, and
CPU-specific SIMD implementations for more speed.
- Standard base64 per the RFC is available via `DEFAULT_ENGINE`. To use different alphabets or other settings (
padding, etc), create your own engine instance.
- `CharacterSet` is now `Alphabet` (per the RFC), and allows creating custom alphabets. The corresponding tables that
were previously code-generated are now built dynamically.
- Since there are already multiple breaking changes, various functions are renamed to be more consistent and
discoverable.
- MSRV is now 1.47.0 to allow various things to use `const fn`.
- `DecoderReader` now owns its inner reader, and can expose it via `into_inner()`. For symmetry, `EncoderWriter` can do
the same with its writer.
- `encoded_len` is now public so you can size encode buffers precisely.
# 0.13.1
- More precise decode buffer sizing, avoiding unnecessary allocation in `decode_config`.
# 0.13.0
- Config methods are const
- Added `EncoderStringWriter` to allow encoding directly to a String
- `EncoderWriter` now owns its delegate writer rather than keeping a reference to it (though refs still work)
- As a consequence, it is now possible to extract the delegate writer from an `EncoderWriter` via `finish()`, which
returns `Result<W>` instead of `Result<()>`. If you were calling `finish()` explicitly, you will now need to
use `let _ = foo.finish()` instead of just `foo.finish()` to avoid a warning about the unused value.
- When decoding input that has both an invalid length and an invalid symbol as the last byte, `InvalidByte` will be
emitted instead of `InvalidLength` to make the problem more obvious.
# 0.12.2
- Add `BinHex` alphabet
# 0.12.1
- Add `Bcrypt` alphabet
# 0.12.0
- A `Read` implementation (`DecoderReader`) to let users transparently decoded data from a b64 input source
- IMAP's modified b64 alphabet
- Relaxed type restrictions to just `AsRef<[ut8]>` for main `encode*`/`decode*` functions
- A minor performance improvement in encoding
# 0.11.0
- Minimum rust version 1.34.0
- `no_std` is now supported via the two new features `alloc` and `std`.
# 0.10.1
- Minimum rust version 1.27.2
- Fix bug in streaming encoding ([#90](https://github.com/marshallpierce/rust-base64/pull/90)): if the underlying writer
didn't write all the bytes given to it, the remaining bytes would not be retried later. See the docs
on `EncoderWriter::write`.
- Make it configurable whether or not to return an error when decoding detects excess trailing bits.
# 0.10.0
- Remove line wrapping. Line wrapping was never a great conceptual fit in this library, and other features (streaming
encoding, etc) either couldn't support it or could support only special cases of it with a great increase in
complexity. Line wrapping has been pulled out into a [line-wrap](https://crates.io/crates/line-wrap) crate, so it's
still available if you need it.
- `Base64Display` creation no longer uses a `Result` because it can't fail, which means its helper methods for
common
configs that `unwrap()` for you are no longer needed
- Add a streaming encoder `Write` impl to transparently base64 as you write.
- Remove the remaining `unsafe` code.
- Remove whitespace stripping to simplify `no_std` support. No out of the box configs use it, and it's trivial to do
yourself if needed: `filter(|b| !b" \n\t\r\x0b\x0c".contains(b)`.
- Detect invalid trailing symbols when decoding and return an error rather than silently ignoring them.
# 0.9.3
- Update safemem
# 0.9.2
- Derive `Clone` for `DecodeError`.
# 0.9.1
- Add support for `crypt(3)`'s base64 variant.
# 0.9.0
- `decode_config_slice` function for no-allocation decoding, analogous to `encode_config_slice`
- Decode performance optimization
# 0.8.0
- `encode_config_slice` function for no-allocation encoding
# 0.7.0
- `STANDARD_NO_PAD` config
- `Base64Display` heap-free wrapper for use in format strings, etc
# 0.6.0
- Decode performance improvements
- Use `unsafe` in fewer places
- Added fuzzers
# 0.5.2
- Avoid usize overflow when calculating length
- Better line wrapping performance
# 0.5.1
- Temporarily disable line wrapping
- Add Apache 2.0 license
# 0.5.0
- MIME support, including configurable line endings and line wrapping
- Removed `decode_ws`
- Renamed `Base64Error` to `DecodeError`
# 0.4.1
- Allow decoding a `AsRef<[u8]>` instead of just a `&str`
# 0.4.0
- Configurable padding
- Encode performance improvements
# 0.3.0
- Added encode/decode functions that do not allocate their own storage
- Decode performance improvements
- Extraneous padding bytes are no longer ignored. Now, an error will be returned.

View File

@ -1,239 +0,0 @@
#[macro_use]
extern crate criterion;
use base64::{
display,
engine::{general_purpose::STANDARD, Engine},
write,
};
use criterion::{black_box, Bencher, BenchmarkId, Criterion, Throughput};
use rand::{Rng, SeedableRng};
use std::io::{self, Read, Write};
fn do_decode_bench(b: &mut Bencher, &size: &usize) {
let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
fill(&mut v);
let encoded = STANDARD.encode(&v);
b.iter(|| {
let orig = STANDARD.decode(&encoded);
black_box(&orig);
});
}
fn do_decode_bench_reuse_buf(b: &mut Bencher, &size: &usize) {
let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
fill(&mut v);
let encoded = STANDARD.encode(&v);
let mut buf = Vec::new();
b.iter(|| {
STANDARD.decode_vec(&encoded, &mut buf).unwrap();
black_box(&buf);
buf.clear();
});
}
fn do_decode_bench_slice(b: &mut Bencher, &size: &usize) {
let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
fill(&mut v);
let encoded = STANDARD.encode(&v);
let mut buf = vec![0; size];
b.iter(|| {
STANDARD.decode_slice(&encoded, &mut buf).unwrap();
black_box(&buf);
});
}
fn do_decode_bench_stream(b: &mut Bencher, &size: &usize) {
let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
fill(&mut v);
let encoded = STANDARD.encode(&v);
let mut buf = vec![0; size];
buf.truncate(0);
b.iter(|| {
let mut cursor = io::Cursor::new(&encoded[..]);
let mut decoder = base64::read::DecoderReader::new(&mut cursor, &STANDARD);
decoder.read_to_end(&mut buf).unwrap();
buf.clear();
black_box(&buf);
});
}
fn do_encode_bench(b: &mut Bencher, &size: &usize) {
let mut v: Vec<u8> = Vec::with_capacity(size);
fill(&mut v);
b.iter(|| {
let e = STANDARD.encode(&v);
black_box(&e);
});
}
fn do_encode_bench_display(b: &mut Bencher, &size: &usize) {
let mut v: Vec<u8> = Vec::with_capacity(size);
fill(&mut v);
b.iter(|| {
let e = format!("{}", display::Base64Display::new(&v, &STANDARD));
black_box(&e);
});
}
fn do_encode_bench_reuse_buf(b: &mut Bencher, &size: &usize) {
let mut v: Vec<u8> = Vec::with_capacity(size);
fill(&mut v);
let mut buf = String::new();
b.iter(|| {
STANDARD.encode_string(&v, &mut buf);
buf.clear();
});
}
fn do_encode_bench_slice(b: &mut Bencher, &size: &usize) {
let mut v: Vec<u8> = Vec::with_capacity(size);
fill(&mut v);
// conservative estimate of encoded size
let mut buf = vec![0; v.len() * 2];
b.iter(|| STANDARD.encode_slice(&v, &mut buf).unwrap());
}
fn do_encode_bench_stream(b: &mut Bencher, &size: &usize) {
let mut v: Vec<u8> = Vec::with_capacity(size);
fill(&mut v);
let mut buf = Vec::new();
buf.reserve(size * 2);
b.iter(|| {
buf.clear();
let mut stream_enc = write::EncoderWriter::new(&mut buf, &STANDARD);
stream_enc.write_all(&v).unwrap();
stream_enc.flush().unwrap();
});
}
fn do_encode_bench_string_stream(b: &mut Bencher, &size: &usize) {
let mut v: Vec<u8> = Vec::with_capacity(size);
fill(&mut v);
b.iter(|| {
let mut stream_enc = write::EncoderStringWriter::new(&STANDARD);
stream_enc.write_all(&v).unwrap();
stream_enc.flush().unwrap();
let _ = stream_enc.into_inner();
});
}
fn do_encode_bench_string_reuse_buf_stream(b: &mut Bencher, &size: &usize) {
let mut v: Vec<u8> = Vec::with_capacity(size);
fill(&mut v);
let mut buf = String::new();
b.iter(|| {
buf.clear();
let mut stream_enc = write::EncoderStringWriter::from_consumer(&mut buf, &STANDARD);
stream_enc.write_all(&v).unwrap();
stream_enc.flush().unwrap();
let _ = stream_enc.into_inner();
});
}
fn fill(v: &mut Vec<u8>) {
let cap = v.capacity();
// weak randomness is plenty; we just want to not be completely friendly to the branch predictor
let mut r = rand::rngs::SmallRng::from_entropy();
while v.len() < cap {
v.push(r.gen::<u8>());
}
}
const BYTE_SIZES: [usize; 5] = [3, 50, 100, 500, 3 * 1024];
// Benchmarks over these byte sizes take longer so we will run fewer samples to
// keep the benchmark runtime reasonable.
const LARGE_BYTE_SIZES: [usize; 3] = [3 * 1024 * 1024, 10 * 1024 * 1024, 30 * 1024 * 1024];
fn encode_benchmarks(c: &mut Criterion, label: &str, byte_sizes: &[usize]) {
let mut group = c.benchmark_group(label);
group
.warm_up_time(std::time::Duration::from_millis(500))
.measurement_time(std::time::Duration::from_secs(3));
for size in byte_sizes {
group
.throughput(Throughput::Bytes(*size as u64))
.bench_with_input(BenchmarkId::new("encode", size), size, do_encode_bench)
.bench_with_input(
BenchmarkId::new("encode_display", size),
size,
do_encode_bench_display,
)
.bench_with_input(
BenchmarkId::new("encode_reuse_buf", size),
size,
do_encode_bench_reuse_buf,
)
.bench_with_input(
BenchmarkId::new("encode_slice", size),
size,
do_encode_bench_slice,
)
.bench_with_input(
BenchmarkId::new("encode_reuse_buf_stream", size),
size,
do_encode_bench_stream,
)
.bench_with_input(
BenchmarkId::new("encode_string_stream", size),
size,
do_encode_bench_string_stream,
)
.bench_with_input(
BenchmarkId::new("encode_string_reuse_buf_stream", size),
size,
do_encode_bench_string_reuse_buf_stream,
);
}
group.finish();
}
fn decode_benchmarks(c: &mut Criterion, label: &str, byte_sizes: &[usize]) {
let mut group = c.benchmark_group(label);
for size in byte_sizes {
group
.warm_up_time(std::time::Duration::from_millis(500))
.measurement_time(std::time::Duration::from_secs(3))
.throughput(Throughput::Bytes(*size as u64))
.bench_with_input(BenchmarkId::new("decode", size), size, do_decode_bench)
.bench_with_input(
BenchmarkId::new("decode_reuse_buf", size),
size,
do_decode_bench_reuse_buf,
)
.bench_with_input(
BenchmarkId::new("decode_slice", size),
size,
do_decode_bench_slice,
)
.bench_with_input(
BenchmarkId::new("decode_stream", size),
size,
do_decode_bench_stream,
);
}
group.finish();
}
fn bench(c: &mut Criterion) {
encode_benchmarks(c, "encode_small_input", &BYTE_SIZES[..]);
encode_benchmarks(c, "encode_large_input", &LARGE_BYTE_SIZES[..]);
decode_benchmarks(c, "decode_small_input", &BYTE_SIZES[..]);
decode_benchmarks(c, "decode_large_input", &LARGE_BYTE_SIZES[..]);
}
criterion_group!(benches, bench);
criterion_main!(benches);

View File

@ -1 +0,0 @@
msrv = "1.48.0"

View File

@ -1,81 +0,0 @@
use std::fs::File;
use std::io::{self, Read};
use std::path::PathBuf;
use std::process;
use base64::{alphabet, engine, read, write};
use clap::Parser;
#[derive(Clone, Debug, Parser, strum::EnumString, Default)]
#[strum(serialize_all = "kebab-case")]
enum Alphabet {
#[default]
Standard,
UrlSafe,
}
/// Base64 encode or decode FILE (or standard input), to standard output.
#[derive(Debug, Parser)]
struct Opt {
/// Decode the base64-encoded input (default: encode the input as base64).
#[structopt(short = 'd', long = "decode")]
decode: bool,
/// The encoding alphabet: "standard" (default) or "url-safe".
#[structopt(long = "alphabet")]
alphabet: Option<Alphabet>,
/// Omit padding characters while encoding, and reject them while decoding.
#[structopt(short = 'p', long = "no-padding")]
no_padding: bool,
/// The file to encode or decode.
#[structopt(name = "FILE", parse(from_os_str))]
file: Option<PathBuf>,
}
fn main() {
let opt = Opt::parse();
let stdin;
let mut input: Box<dyn Read> = match opt.file {
None => {
stdin = io::stdin();
Box::new(stdin.lock())
}
Some(ref f) if f.as_os_str() == "-" => {
stdin = io::stdin();
Box::new(stdin.lock())
}
Some(f) => Box::new(File::open(f).unwrap()),
};
let alphabet = opt.alphabet.unwrap_or_default();
let engine = engine::GeneralPurpose::new(
&match alphabet {
Alphabet::Standard => alphabet::STANDARD,
Alphabet::UrlSafe => alphabet::URL_SAFE,
},
match opt.no_padding {
true => engine::general_purpose::NO_PAD,
false => engine::general_purpose::PAD,
},
);
let stdout = io::stdout();
let mut stdout = stdout.lock();
let r = if opt.decode {
let mut decoder = read::DecoderReader::new(&mut input, &engine);
io::copy(&mut decoder, &mut stdout)
} else {
let mut encoder = write::EncoderWriter::new(&mut stdout, &engine);
io::copy(&mut input, &mut encoder)
};
if let Err(e) = r {
eprintln!(
"Base64 {} failed with {}",
if opt.decode { "decode" } else { "encode" },
e
);
process::exit(1);
}
}

View File

@ -1,34 +0,0 @@
<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 128 128">
<defs>
<linearGradient id="linear-gradient" x1="40.69" y1="-676.56" x2="83.48" y2="-676.56" gradientTransform="matrix(1, 0, 0, -1, 0, -648.86)" gradientUnits="userSpaceOnUse">
<stop offset="0" stop-color="#ed358c"/>
<stop offset="0.16" stop-color="#e9388c"/>
<stop offset="0.3" stop-color="#de418c"/>
<stop offset="0.43" stop-color="#cc508c"/>
<stop offset="0.57" stop-color="#b2658d"/>
<stop offset="0.7" stop-color="#90808d"/>
<stop offset="0.83" stop-color="#67a18e"/>
<stop offset="0.95" stop-color="#37c78f"/>
<stop offset="1" stop-color="#22d88f"/>
</linearGradient>
<linearGradient id="linear-gradient-2" x1="32.58" y1="-665.27" x2="13.76" y2="-791.59" gradientTransform="matrix(1, 0, 0, -1, 0, -648.86)" gradientUnits="userSpaceOnUse">
<stop offset="0.09" stop-color="#22d88f"/>
<stop offset="0.9" stop-color="#029de0"/>
</linearGradient>
<linearGradient id="linear-gradient-3" x1="116.68" y1="-660.66" x2="-12.09" y2="-796.66" xlink:href="#linear-gradient-2"/>
<linearGradient id="linear-gradient-4" x1="73.35" y1="-739.1" x2="122.29" y2="-746.06" xlink:href="#linear-gradient-2"/>
</defs>
<title>icon_CLion</title>
<g>
<polygon points="49.2 51.8 40.6 55.4 48.4 0 77.8 16.2 49.2 51.8" fill="url(#linear-gradient)"/>
<polygon points="44.6 76.8 48.8 0 11.8 23.2 0 94 44.6 76.8" fill="url(#linear-gradient-2)"/>
<polygon points="125.4 38.4 109 4.8 77.8 16.2 55 41.4 0 94 41.6 124.4 93.6 77.2 125.4 38.4" fill="url(#linear-gradient-3)"/>
<polygon points="53.8 54.6 46.6 98.4 75.8 121 107.8 128 128 82.4 53.8 54.6" fill="url(#linear-gradient-4)"/>
</g>
<g>
<rect x="24" y="24" width="80" height="80"/>
<rect x="31.6" y="89" width="30" height="5" fill="#fff"/>
<path d="M31,51.2h0A16.83,16.83,0,0,1,48.2,34c6.2,0,10,2,13,5.2l-4.6,5.4c-2.6-2.4-5.2-3.8-8.4-3.8-5.6,0-9.6,4.6-9.6,10.4h0c0,5.6,4,10.4,9.6,10.4,3.8,0,6.2-1.6,8.8-3.8l4.6,4.6c-3.4,3.6-7.2,6-13.6,6A17,17,0,0,1,31,51.2" fill="#fff"/>
<path d="M66.6,34.4H74v27H88.4v6.2H66.6V34.4Z" fill="#fff"/>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 2.2 KiB

View File

@ -1,285 +0,0 @@
//! Provides [Alphabet] and constants for alphabets commonly used in the wild.
use crate::PAD_BYTE;
use core::{convert, fmt};
#[cfg(any(feature = "std", test))]
use std::error;
const ALPHABET_SIZE: usize = 64;
/// An alphabet defines the 64 ASCII characters (symbols) used for base64.
///
/// Common alphabets are provided as constants, and custom alphabets
/// can be made via `from_str` or the `TryFrom<str>` implementation.
///
/// # Examples
///
/// Building and using a custom Alphabet:
///
/// ```
/// let custom = base64::alphabet::Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/").unwrap();
///
/// let engine = base64::engine::GeneralPurpose::new(
/// &custom,
/// base64::engine::general_purpose::PAD);
/// ```
///
/// Building a const:
///
/// ```
/// use base64::alphabet::Alphabet;
///
/// static CUSTOM: Alphabet = {
/// // Result::unwrap() isn't const yet, but panic!() is OK
/// match Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/") {
/// Ok(x) => x,
/// Err(_) => panic!("creation of alphabet failed"),
/// }
/// };
/// ```
///
/// Building lazily:
///
/// ```
/// use base64::{
/// alphabet::Alphabet,
/// engine::{general_purpose::GeneralPurpose, GeneralPurposeConfig},
/// };
/// use once_cell::sync::Lazy;
///
/// static CUSTOM: Lazy<Alphabet> = Lazy::new(||
/// Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/").unwrap()
/// );
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Alphabet {
pub(crate) symbols: [u8; ALPHABET_SIZE],
}
impl Alphabet {
/// Performs no checks so that it can be const.
/// Used only for known-valid strings.
const fn from_str_unchecked(alphabet: &str) -> Self {
let mut symbols = [0_u8; ALPHABET_SIZE];
let source_bytes = alphabet.as_bytes();
// a way to copy that's allowed in const fn
let mut index = 0;
while index < ALPHABET_SIZE {
symbols[index] = source_bytes[index];
index += 1;
}
Self { symbols }
}
/// Create an `Alphabet` from a string of 64 unique printable ASCII bytes.
///
/// The `=` byte is not allowed as it is used for padding.
pub const fn new(alphabet: &str) -> Result<Self, ParseAlphabetError> {
let bytes = alphabet.as_bytes();
if bytes.len() != ALPHABET_SIZE {
return Err(ParseAlphabetError::InvalidLength);
}
{
let mut index = 0;
while index < ALPHABET_SIZE {
let byte = bytes[index];
// must be ascii printable. 127 (DEL) is commonly considered printable
// for some reason but clearly unsuitable for base64.
if !(byte >= 32_u8 && byte <= 126_u8) {
return Err(ParseAlphabetError::UnprintableByte(byte));
}
// = is assumed to be padding, so cannot be used as a symbol
if byte == PAD_BYTE {
return Err(ParseAlphabetError::ReservedByte(byte));
}
// Check for duplicates while staying within what const allows.
// It's n^2, but only over 64 hot bytes, and only once, so it's likely in the single digit
// microsecond range.
let mut probe_index = 0;
while probe_index < ALPHABET_SIZE {
if probe_index == index {
probe_index += 1;
continue;
}
let probe_byte = bytes[probe_index];
if byte == probe_byte {
return Err(ParseAlphabetError::DuplicatedByte(byte));
}
probe_index += 1;
}
index += 1;
}
}
Ok(Self::from_str_unchecked(alphabet))
}
/// Create a `&str` from the symbols in the `Alphabet`
pub fn as_str(&self) -> &str {
core::str::from_utf8(&self.symbols).unwrap()
}
}
impl convert::TryFrom<&str> for Alphabet {
type Error = ParseAlphabetError;
fn try_from(value: &str) -> Result<Self, Self::Error> {
Self::new(value)
}
}
/// Possible errors when constructing an [Alphabet] from a `str`.
#[derive(Debug, Eq, PartialEq)]
pub enum ParseAlphabetError {
/// Alphabets must be 64 ASCII bytes
InvalidLength,
/// All bytes must be unique
DuplicatedByte(u8),
/// All bytes must be printable (in the range `[32, 126]`).
UnprintableByte(u8),
/// `=` cannot be used
ReservedByte(u8),
}
impl fmt::Display for ParseAlphabetError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::InvalidLength => write!(f, "Invalid length - must be 64 bytes"),
Self::DuplicatedByte(b) => write!(f, "Duplicated byte: {:#04x}", b),
Self::UnprintableByte(b) => write!(f, "Unprintable byte: {:#04x}", b),
Self::ReservedByte(b) => write!(f, "Reserved byte: {:#04x}", b),
}
}
}
#[cfg(any(feature = "std", test))]
impl error::Error for ParseAlphabetError {}
/// The standard alphabet (with `+` and `/`) specified in [RFC 4648][].
///
/// [RFC 4648]: https://datatracker.ietf.org/doc/html/rfc4648#section-4
pub const STANDARD: Alphabet = Alphabet::from_str_unchecked(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
);
/// The URL-safe alphabet (with `-` and `_`) specified in [RFC 4648][].
///
/// [RFC 4648]: https://datatracker.ietf.org/doc/html/rfc4648#section-5
pub const URL_SAFE: Alphabet = Alphabet::from_str_unchecked(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_",
);
/// The `crypt(3)` alphabet (with `.` and `/` as the _first_ two characters).
///
/// Not standardized, but folk wisdom on the net asserts that this alphabet is what crypt uses.
pub const CRYPT: Alphabet = Alphabet::from_str_unchecked(
"./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
);
/// The bcrypt alphabet.
pub const BCRYPT: Alphabet = Alphabet::from_str_unchecked(
"./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
);
/// The alphabet used in IMAP-modified UTF-7 (with `+` and `,`).
///
/// See [RFC 3501](https://tools.ietf.org/html/rfc3501#section-5.1.3)
pub const IMAP_MUTF7: Alphabet = Alphabet::from_str_unchecked(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,",
);
/// The alphabet used in BinHex 4.0 files.
///
/// See [BinHex 4.0 Definition](http://files.stairways.com/other/binhex-40-specs-info.txt)
pub const BIN_HEX: Alphabet = Alphabet::from_str_unchecked(
"!\"#$%&'()*+,-0123456789@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdehijklmpqr",
);
#[cfg(test)]
mod tests {
use crate::alphabet::*;
use core::convert::TryFrom as _;
#[test]
fn detects_duplicate_start() {
assert_eq!(
ParseAlphabetError::DuplicatedByte(b'A'),
Alphabet::new("AACDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
.unwrap_err()
);
}
#[test]
fn detects_duplicate_end() {
assert_eq!(
ParseAlphabetError::DuplicatedByte(b'/'),
Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789//")
.unwrap_err()
);
}
#[test]
fn detects_duplicate_middle() {
assert_eq!(
ParseAlphabetError::DuplicatedByte(b'Z'),
Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZZbcdefghijklmnopqrstuvwxyz0123456789+/")
.unwrap_err()
);
}
#[test]
fn detects_length() {
assert_eq!(
ParseAlphabetError::InvalidLength,
Alphabet::new(
"xxxxxxxxxABCDEFGHIJKLMNOPQRSTUVWXYZZbcdefghijklmnopqrstuvwxyz0123456789+/",
)
.unwrap_err()
);
}
#[test]
fn detects_padding() {
assert_eq!(
ParseAlphabetError::ReservedByte(b'='),
Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+=")
.unwrap_err()
);
}
#[test]
fn detects_unprintable() {
// form feed
assert_eq!(
ParseAlphabetError::UnprintableByte(0xc),
Alphabet::new("\x0cBCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
.unwrap_err()
);
}
#[test]
fn same_as_unchecked() {
assert_eq!(
STANDARD,
Alphabet::try_from("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
.unwrap()
);
}
#[test]
fn str_same_as_input() {
let alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
let a = Alphabet::try_from(alphabet).unwrap();
assert_eq!(alphabet, a.as_str())
}
}

View File

@ -1,172 +0,0 @@
use crate::{
encode::add_padding,
engine::{Config, Engine},
};
#[cfg(any(feature = "alloc", test))]
use alloc::string::String;
#[cfg(any(feature = "alloc", test))]
use core::str;
/// The output mechanism for ChunkedEncoder's encoded bytes.
pub trait Sink {
type Error;
/// Handle a chunk of encoded base64 data (as UTF-8 bytes)
fn write_encoded_bytes(&mut self, encoded: &[u8]) -> Result<(), Self::Error>;
}
/// A base64 encoder that emits encoded bytes in chunks without heap allocation.
pub struct ChunkedEncoder<'e, E: Engine + ?Sized> {
engine: &'e E,
}
impl<'e, E: Engine + ?Sized> ChunkedEncoder<'e, E> {
pub fn new(engine: &'e E) -> ChunkedEncoder<'e, E> {
ChunkedEncoder { engine }
}
pub fn encode<S: Sink>(&self, bytes: &[u8], sink: &mut S) -> Result<(), S::Error> {
const BUF_SIZE: usize = 1024;
const CHUNK_SIZE: usize = BUF_SIZE / 4 * 3;
let mut buf = [0; BUF_SIZE];
for chunk in bytes.chunks(CHUNK_SIZE) {
let mut len = self.engine.internal_encode(chunk, &mut buf);
if chunk.len() != CHUNK_SIZE && self.engine.config().encode_padding() {
// Final, potentially partial, chunk.
// Only need to consider if padding is needed on a partial chunk since full chunk
// is a multiple of 3, which therefore won't be padded.
// Pad output to multiple of four bytes if required by config.
len += add_padding(len, &mut buf[len..]);
}
sink.write_encoded_bytes(&buf[..len])?;
}
Ok(())
}
}
// A really simple sink that just appends to a string
#[cfg(any(feature = "alloc", test))]
pub(crate) struct StringSink<'a> {
string: &'a mut String,
}
#[cfg(any(feature = "alloc", test))]
impl<'a> StringSink<'a> {
pub(crate) fn new(s: &mut String) -> StringSink {
StringSink { string: s }
}
}
#[cfg(any(feature = "alloc", test))]
impl<'a> Sink for StringSink<'a> {
type Error = ();
fn write_encoded_bytes(&mut self, s: &[u8]) -> Result<(), Self::Error> {
self.string.push_str(str::from_utf8(s).unwrap());
Ok(())
}
}
#[cfg(test)]
pub mod tests {
use rand::{
distributions::{Distribution, Uniform},
Rng, SeedableRng,
};
use crate::{
alphabet::STANDARD,
engine::general_purpose::{GeneralPurpose, GeneralPurposeConfig, PAD},
tests::random_engine,
};
use super::*;
#[test]
fn chunked_encode_empty() {
assert_eq!("", chunked_encode_str(&[], PAD));
}
#[test]
fn chunked_encode_intermediate_fast_loop() {
// > 8 bytes input, will enter the pretty fast loop
assert_eq!("Zm9vYmFyYmF6cXV4", chunked_encode_str(b"foobarbazqux", PAD));
}
#[test]
fn chunked_encode_fast_loop() {
// > 32 bytes input, will enter the uber fast loop
assert_eq!(
"Zm9vYmFyYmF6cXV4cXV1eGNvcmdlZ3JhdWx0Z2FycGx5eg==",
chunked_encode_str(b"foobarbazquxquuxcorgegraultgarplyz", PAD)
);
}
#[test]
fn chunked_encode_slow_loop_only() {
// < 8 bytes input, slow loop only
assert_eq!("Zm9vYmFy", chunked_encode_str(b"foobar", PAD));
}
#[test]
fn chunked_encode_matches_normal_encode_random_string_sink() {
let helper = StringSinkTestHelper;
chunked_encode_matches_normal_encode_random(&helper);
}
pub fn chunked_encode_matches_normal_encode_random<S: SinkTestHelper>(sink_test_helper: &S) {
let mut input_buf: Vec<u8> = Vec::new();
let mut output_buf = String::new();
let mut rng = rand::rngs::SmallRng::from_entropy();
let input_len_range = Uniform::new(1, 10_000);
for _ in 0..20_000 {
input_buf.clear();
output_buf.clear();
let buf_len = input_len_range.sample(&mut rng);
for _ in 0..buf_len {
input_buf.push(rng.gen());
}
let engine = random_engine(&mut rng);
let chunk_encoded_string = sink_test_helper.encode_to_string(&engine, &input_buf);
engine.encode_string(&input_buf, &mut output_buf);
assert_eq!(output_buf, chunk_encoded_string, "input len={}", buf_len);
}
}
fn chunked_encode_str(bytes: &[u8], config: GeneralPurposeConfig) -> String {
let mut s = String::new();
let mut sink = StringSink::new(&mut s);
let engine = GeneralPurpose::new(&STANDARD, config);
let encoder = ChunkedEncoder::new(&engine);
encoder.encode(bytes, &mut sink).unwrap();
s
}
// An abstraction around sinks so that we can have tests that easily to any sink implementation
pub trait SinkTestHelper {
fn encode_to_string<E: Engine>(&self, engine: &E, bytes: &[u8]) -> String;
}
struct StringSinkTestHelper;
impl SinkTestHelper for StringSinkTestHelper {
fn encode_to_string<E: Engine>(&self, engine: &E, bytes: &[u8]) -> String {
let encoder = ChunkedEncoder::new(engine);
let mut s = String::new();
let mut sink = StringSink::new(&mut s);
encoder.encode(bytes, &mut sink).unwrap();
s
}
}
}

View File

@ -1,340 +0,0 @@
use crate::engine::{general_purpose::STANDARD, DecodeEstimate, Engine};
#[cfg(any(feature = "alloc", test))]
use alloc::vec::Vec;
use core::fmt;
#[cfg(any(feature = "std", test))]
use std::error;
/// Errors that can occur while decoding.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum DecodeError {
/// An invalid byte was found in the input. The offset and offending byte are provided.
/// Padding characters (`=`) interspersed in the encoded form will be treated as invalid bytes.
InvalidByte(usize, u8),
/// The length of the input is invalid.
/// A typical cause of this is stray trailing whitespace or other separator bytes.
/// In the case where excess trailing bytes have produced an invalid length *and* the last byte
/// is also an invalid base64 symbol (as would be the case for whitespace, etc), `InvalidByte`
/// will be emitted instead of `InvalidLength` to make the issue easier to debug.
InvalidLength,
/// The last non-padding input symbol's encoded 6 bits have nonzero bits that will be discarded.
/// This is indicative of corrupted or truncated Base64.
/// Unlike `InvalidByte`, which reports symbols that aren't in the alphabet, this error is for
/// symbols that are in the alphabet but represent nonsensical encodings.
InvalidLastSymbol(usize, u8),
/// The nature of the padding was not as configured: absent or incorrect when it must be
/// canonical, or present when it must be absent, etc.
InvalidPadding,
}
impl fmt::Display for DecodeError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Self::InvalidByte(index, byte) => write!(f, "Invalid byte {}, offset {}.", byte, index),
Self::InvalidLength => write!(f, "Encoded text cannot have a 6-bit remainder."),
Self::InvalidLastSymbol(index, byte) => {
write!(f, "Invalid last symbol {}, offset {}.", byte, index)
}
Self::InvalidPadding => write!(f, "Invalid padding"),
}
}
}
#[cfg(any(feature = "std", test))]
impl error::Error for DecodeError {}
/// Errors that can occur while decoding into a slice.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum DecodeSliceError {
/// A [DecodeError] occurred
DecodeError(DecodeError),
/// The provided slice _may_ be too small.
///
/// The check is conservative (assumes the last triplet of output bytes will all be needed).
OutputSliceTooSmall,
}
impl fmt::Display for DecodeSliceError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::DecodeError(e) => write!(f, "DecodeError: {}", e),
Self::OutputSliceTooSmall => write!(f, "Output slice too small"),
}
}
}
#[cfg(any(feature = "std", test))]
impl error::Error for DecodeSliceError {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match self {
DecodeSliceError::DecodeError(e) => Some(e),
DecodeSliceError::OutputSliceTooSmall => None,
}
}
}
impl From<DecodeError> for DecodeSliceError {
fn from(e: DecodeError) -> Self {
DecodeSliceError::DecodeError(e)
}
}
/// Decode base64 using the [`STANDARD` engine](STANDARD).
///
/// See [Engine::decode].
#[deprecated(since = "0.21.0", note = "Use Engine::decode")]
#[cfg(any(feature = "alloc", test))]
pub fn decode<T: AsRef<[u8]>>(input: T) -> Result<Vec<u8>, DecodeError> {
STANDARD.decode(input)
}
/// Decode from string reference as octets using the specified [Engine].
///
/// See [Engine::decode].
///Returns a `Result` containing a `Vec<u8>`.
#[deprecated(since = "0.21.0", note = "Use Engine::decode")]
#[cfg(any(feature = "alloc", test))]
pub fn decode_engine<E: Engine, T: AsRef<[u8]>>(
input: T,
engine: &E,
) -> Result<Vec<u8>, DecodeError> {
engine.decode(input)
}
/// Decode from string reference as octets.
///
/// See [Engine::decode_vec].
#[cfg(any(feature = "alloc", test))]
#[deprecated(since = "0.21.0", note = "Use Engine::decode_vec")]
pub fn decode_engine_vec<E: Engine, T: AsRef<[u8]>>(
input: T,
buffer: &mut Vec<u8>,
engine: &E,
) -> Result<(), DecodeError> {
engine.decode_vec(input, buffer)
}
/// Decode the input into the provided output slice.
///
/// See [Engine::decode_slice].
#[deprecated(since = "0.21.0", note = "Use Engine::decode_slice")]
pub fn decode_engine_slice<E: Engine, T: AsRef<[u8]>>(
input: T,
output: &mut [u8],
engine: &E,
) -> Result<usize, DecodeSliceError> {
engine.decode_slice(input, output)
}
/// Returns a conservative estimate of the decoded size of `encoded_len` base64 symbols (rounded up
/// to the next group of 3 decoded bytes).
///
/// The resulting length will be a safe choice for the size of a decode buffer, but may have up to
/// 2 trailing bytes that won't end up being needed.
///
/// # Examples
///
/// ```
/// use base64::decoded_len_estimate;
///
/// assert_eq!(3, decoded_len_estimate(1));
/// assert_eq!(3, decoded_len_estimate(2));
/// assert_eq!(3, decoded_len_estimate(3));
/// assert_eq!(3, decoded_len_estimate(4));
/// // start of the next quad of encoded symbols
/// assert_eq!(6, decoded_len_estimate(5));
/// ```
pub fn decoded_len_estimate(encoded_len: usize) -> usize {
STANDARD
.internal_decoded_len_estimate(encoded_len)
.decoded_len_estimate()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
alphabet,
engine::{general_purpose, Config, GeneralPurpose},
tests::{assert_encode_sanity, random_engine},
};
use rand::{
distributions::{Distribution, Uniform},
Rng, SeedableRng,
};
#[test]
fn decode_into_nonempty_vec_doesnt_clobber_existing_prefix() {
let mut orig_data = Vec::new();
let mut encoded_data = String::new();
let mut decoded_with_prefix = Vec::new();
let mut decoded_without_prefix = Vec::new();
let mut prefix = Vec::new();
let prefix_len_range = Uniform::new(0, 1000);
let input_len_range = Uniform::new(0, 1000);
let mut rng = rand::rngs::SmallRng::from_entropy();
for _ in 0..10_000 {
orig_data.clear();
encoded_data.clear();
decoded_with_prefix.clear();
decoded_without_prefix.clear();
prefix.clear();
let input_len = input_len_range.sample(&mut rng);
for _ in 0..input_len {
orig_data.push(rng.gen());
}
let engine = random_engine(&mut rng);
engine.encode_string(&orig_data, &mut encoded_data);
assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len);
let prefix_len = prefix_len_range.sample(&mut rng);
// fill the buf with a prefix
for _ in 0..prefix_len {
prefix.push(rng.gen());
}
decoded_with_prefix.resize(prefix_len, 0);
decoded_with_prefix.copy_from_slice(&prefix);
// decode into the non-empty buf
engine
.decode_vec(&encoded_data, &mut decoded_with_prefix)
.unwrap();
// also decode into the empty buf
engine
.decode_vec(&encoded_data, &mut decoded_without_prefix)
.unwrap();
assert_eq!(
prefix_len + decoded_without_prefix.len(),
decoded_with_prefix.len()
);
assert_eq!(orig_data, decoded_without_prefix);
// append plain decode onto prefix
prefix.append(&mut decoded_without_prefix);
assert_eq!(prefix, decoded_with_prefix);
}
}
#[test]
fn decode_slice_doesnt_clobber_existing_prefix_or_suffix() {
do_decode_slice_doesnt_clobber_existing_prefix_or_suffix(|e, input, output| {
e.decode_slice(input, output).unwrap()
})
}
#[test]
fn decode_slice_unchecked_doesnt_clobber_existing_prefix_or_suffix() {
do_decode_slice_doesnt_clobber_existing_prefix_or_suffix(|e, input, output| {
e.decode_slice_unchecked(input, output).unwrap()
})
}
#[test]
fn decode_engine_estimation_works_for_various_lengths() {
let engine = GeneralPurpose::new(&alphabet::STANDARD, general_purpose::NO_PAD);
for num_prefix_quads in 0..100 {
for suffix in &["AA", "AAA", "AAAA"] {
let mut prefix = "AAAA".repeat(num_prefix_quads);
prefix.push_str(suffix);
// make sure no overflow (and thus a panic) occurs
let res = engine.decode(prefix);
assert!(res.is_ok());
}
}
}
#[test]
fn decode_slice_output_length_errors() {
for num_quads in 1..100 {
let input = "AAAA".repeat(num_quads);
let mut vec = vec![0; (num_quads - 1) * 3];
assert_eq!(
DecodeSliceError::OutputSliceTooSmall,
STANDARD.decode_slice(&input, &mut vec).unwrap_err()
);
vec.push(0);
assert_eq!(
DecodeSliceError::OutputSliceTooSmall,
STANDARD.decode_slice(&input, &mut vec).unwrap_err()
);
vec.push(0);
assert_eq!(
DecodeSliceError::OutputSliceTooSmall,
STANDARD.decode_slice(&input, &mut vec).unwrap_err()
);
vec.push(0);
// now it works
assert_eq!(
num_quads * 3,
STANDARD.decode_slice(&input, &mut vec).unwrap()
);
}
}
fn do_decode_slice_doesnt_clobber_existing_prefix_or_suffix<
F: Fn(&GeneralPurpose, &[u8], &mut [u8]) -> usize,
>(
call_decode: F,
) {
let mut orig_data = Vec::new();
let mut encoded_data = String::new();
let mut decode_buf = Vec::new();
let mut decode_buf_copy: Vec<u8> = Vec::new();
let input_len_range = Uniform::new(0, 1000);
let mut rng = rand::rngs::SmallRng::from_entropy();
for _ in 0..10_000 {
orig_data.clear();
encoded_data.clear();
decode_buf.clear();
decode_buf_copy.clear();
let input_len = input_len_range.sample(&mut rng);
for _ in 0..input_len {
orig_data.push(rng.gen());
}
let engine = random_engine(&mut rng);
engine.encode_string(&orig_data, &mut encoded_data);
assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len);
// fill the buffer with random garbage, long enough to have some room before and after
for _ in 0..5000 {
decode_buf.push(rng.gen());
}
// keep a copy for later comparison
decode_buf_copy.extend(decode_buf.iter());
let offset = 1000;
// decode into the non-empty buf
let decode_bytes_written =
call_decode(&engine, encoded_data.as_bytes(), &mut decode_buf[offset..]);
assert_eq!(orig_data.len(), decode_bytes_written);
assert_eq!(
orig_data,
&decode_buf[offset..(offset + decode_bytes_written)]
);
assert_eq!(&decode_buf_copy[0..offset], &decode_buf[0..offset]);
assert_eq!(
&decode_buf_copy[offset + decode_bytes_written..],
&decode_buf[offset + decode_bytes_written..]
);
}
}
}

View File

@ -1,88 +0,0 @@
//! Enables base64'd output anywhere you might use a `Display` implementation, like a format string.
//!
//! ```
//! use base64::{display::Base64Display, engine::general_purpose::STANDARD};
//!
//! let data = vec![0x0, 0x1, 0x2, 0x3];
//! let wrapper = Base64Display::new(&data, &STANDARD);
//!
//! assert_eq!("base64: AAECAw==", format!("base64: {}", wrapper));
//! ```
use super::chunked_encoder::ChunkedEncoder;
use crate::engine::Engine;
use core::fmt::{Display, Formatter};
use core::{fmt, str};
/// A convenience wrapper for base64'ing bytes into a format string without heap allocation.
pub struct Base64Display<'a, 'e, E: Engine> {
bytes: &'a [u8],
chunked_encoder: ChunkedEncoder<'e, E>,
}
impl<'a, 'e, E: Engine> Base64Display<'a, 'e, E> {
/// Create a `Base64Display` with the provided engine.
pub fn new(bytes: &'a [u8], engine: &'e E) -> Base64Display<'a, 'e, E> {
Base64Display {
bytes,
chunked_encoder: ChunkedEncoder::new(engine),
}
}
}
impl<'a, 'e, E: Engine> Display for Base64Display<'a, 'e, E> {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
let mut sink = FormatterSink { f: formatter };
self.chunked_encoder.encode(self.bytes, &mut sink)
}
}
struct FormatterSink<'a, 'b: 'a> {
f: &'a mut Formatter<'b>,
}
impl<'a, 'b: 'a> super::chunked_encoder::Sink for FormatterSink<'a, 'b> {
type Error = fmt::Error;
fn write_encoded_bytes(&mut self, encoded: &[u8]) -> Result<(), Self::Error> {
// Avoid unsafe. If max performance is needed, write your own display wrapper that uses
// unsafe here to gain about 10-15%.
self.f
.write_str(str::from_utf8(encoded).expect("base64 data was not utf8"))
}
}
#[cfg(test)]
mod tests {
use super::super::chunked_encoder::tests::{
chunked_encode_matches_normal_encode_random, SinkTestHelper,
};
use super::*;
use crate::engine::general_purpose::STANDARD;
#[test]
fn basic_display() {
assert_eq!(
"~$Zm9vYmFy#*",
format!("~${}#*", Base64Display::new(b"foobar", &STANDARD))
);
assert_eq!(
"~$Zm9vYmFyZg==#*",
format!("~${}#*", Base64Display::new(b"foobarf", &STANDARD))
);
}
#[test]
fn display_encode_matches_normal_encode() {
let helper = DisplaySinkTestHelper;
chunked_encode_matches_normal_encode_random(&helper);
}
struct DisplaySinkTestHelper;
impl SinkTestHelper for DisplaySinkTestHelper {
fn encode_to_string<E: Engine>(&self, engine: &E, bytes: &[u8]) -> String {
format!("{}", Base64Display::new(bytes, engine))
}
}
}

View File

@ -1,492 +0,0 @@
#[cfg(any(feature = "alloc", test))]
use alloc::string::String;
use core::fmt;
#[cfg(any(feature = "std", test))]
use std::error;
#[cfg(any(feature = "alloc", test))]
use crate::engine::general_purpose::STANDARD;
use crate::engine::{Config, Engine};
use crate::PAD_BYTE;
/// Encode arbitrary octets as base64 using the [`STANDARD` engine](STANDARD).
///
/// See [Engine::encode].
#[allow(unused)]
#[deprecated(since = "0.21.0", note = "Use Engine::encode")]
#[cfg(any(feature = "alloc", test))]
pub fn encode<T: AsRef<[u8]>>(input: T) -> String {
STANDARD.encode(input)
}
///Encode arbitrary octets as base64 using the provided `Engine` into a new `String`.
///
/// See [Engine::encode].
#[allow(unused)]
#[deprecated(since = "0.21.0", note = "Use Engine::encode")]
#[cfg(any(feature = "alloc", test))]
pub fn encode_engine<E: Engine, T: AsRef<[u8]>>(input: T, engine: &E) -> String {
engine.encode(input)
}
///Encode arbitrary octets as base64 into a supplied `String`.
///
/// See [Engine::encode_string].
#[allow(unused)]
#[deprecated(since = "0.21.0", note = "Use Engine::encode_string")]
#[cfg(any(feature = "alloc", test))]
pub fn encode_engine_string<E: Engine, T: AsRef<[u8]>>(
input: T,
output_buf: &mut String,
engine: &E,
) {
engine.encode_string(input, output_buf)
}
/// Encode arbitrary octets as base64 into a supplied slice.
///
/// See [Engine::encode_slice].
#[allow(unused)]
#[deprecated(since = "0.21.0", note = "Use Engine::encode_slice")]
pub fn encode_engine_slice<E: Engine, T: AsRef<[u8]>>(
input: T,
output_buf: &mut [u8],
engine: &E,
) -> Result<usize, EncodeSliceError> {
engine.encode_slice(input, output_buf)
}
/// B64-encode and pad (if configured).
///
/// This helper exists to avoid recalculating encoded_size, which is relatively expensive on short
/// inputs.
///
/// `encoded_size` is the encoded size calculated for `input`.
///
/// `output` must be of size `encoded_size`.
///
/// All bytes in `output` will be written to since it is exactly the size of the output.
pub(crate) fn encode_with_padding<E: Engine + ?Sized>(
input: &[u8],
output: &mut [u8],
engine: &E,
expected_encoded_size: usize,
) {
debug_assert_eq!(expected_encoded_size, output.len());
let b64_bytes_written = engine.internal_encode(input, output);
let padding_bytes = if engine.config().encode_padding() {
add_padding(b64_bytes_written, &mut output[b64_bytes_written..])
} else {
0
};
let encoded_bytes = b64_bytes_written
.checked_add(padding_bytes)
.expect("usize overflow when calculating b64 length");
debug_assert_eq!(expected_encoded_size, encoded_bytes);
}
/// Calculate the base64 encoded length for a given input length, optionally including any
/// appropriate padding bytes.
///
/// Returns `None` if the encoded length can't be represented in `usize`. This will happen for
/// input lengths in approximately the top quarter of the range of `usize`.
pub const fn encoded_len(bytes_len: usize, padding: bool) -> Option<usize> {
let rem = bytes_len % 3;
let complete_input_chunks = bytes_len / 3;
// `?` is disallowed in const, and `let Some(_) = _ else` requires 1.65.0, whereas this
// messier syntax works on 1.48
let complete_chunk_output =
if let Some(complete_chunk_output) = complete_input_chunks.checked_mul(4) {
complete_chunk_output
} else {
return None;
};
if rem > 0 {
if padding {
complete_chunk_output.checked_add(4)
} else {
let encoded_rem = match rem {
1 => 2,
// only other possible remainder is 2
// can't use a separate _ => unreachable!() in const fns in ancient rust versions
_ => 3,
};
complete_chunk_output.checked_add(encoded_rem)
}
} else {
Some(complete_chunk_output)
}
}
/// Write padding characters.
/// `unpadded_output_len` is the size of the unpadded but base64 encoded data.
/// `output` is the slice where padding should be written, of length at least 2.
///
/// Returns the number of padding bytes written.
pub(crate) fn add_padding(unpadded_output_len: usize, output: &mut [u8]) -> usize {
let pad_bytes = (4 - (unpadded_output_len % 4)) % 4;
// for just a couple bytes, this has better performance than using
// .fill(), or iterating over mutable refs, which call memset()
#[allow(clippy::needless_range_loop)]
for i in 0..pad_bytes {
output[i] = PAD_BYTE;
}
pad_bytes
}
/// Errors that can occur while encoding into a slice.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum EncodeSliceError {
/// The provided slice is too small.
OutputSliceTooSmall,
}
impl fmt::Display for EncodeSliceError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::OutputSliceTooSmall => write!(f, "Output slice too small"),
}
}
}
#[cfg(any(feature = "std", test))]
impl error::Error for EncodeSliceError {}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
alphabet,
engine::general_purpose::{GeneralPurpose, NO_PAD, STANDARD},
tests::{assert_encode_sanity, random_config, random_engine},
};
use rand::{
distributions::{Distribution, Uniform},
Rng, SeedableRng,
};
use std::str;
const URL_SAFE_NO_PAD_ENGINE: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, NO_PAD);
#[test]
fn encoded_size_correct_standard() {
assert_encoded_length(0, 0, &STANDARD, true);
assert_encoded_length(1, 4, &STANDARD, true);
assert_encoded_length(2, 4, &STANDARD, true);
assert_encoded_length(3, 4, &STANDARD, true);
assert_encoded_length(4, 8, &STANDARD, true);
assert_encoded_length(5, 8, &STANDARD, true);
assert_encoded_length(6, 8, &STANDARD, true);
assert_encoded_length(7, 12, &STANDARD, true);
assert_encoded_length(8, 12, &STANDARD, true);
assert_encoded_length(9, 12, &STANDARD, true);
assert_encoded_length(54, 72, &STANDARD, true);
assert_encoded_length(55, 76, &STANDARD, true);
assert_encoded_length(56, 76, &STANDARD, true);
assert_encoded_length(57, 76, &STANDARD, true);
assert_encoded_length(58, 80, &STANDARD, true);
}
#[test]
fn encoded_size_correct_no_pad() {
assert_encoded_length(0, 0, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(1, 2, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(2, 3, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(3, 4, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(4, 6, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(5, 7, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(6, 8, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(7, 10, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(8, 11, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(9, 12, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(54, 72, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(55, 74, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(56, 75, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(57, 76, &URL_SAFE_NO_PAD_ENGINE, false);
assert_encoded_length(58, 78, &URL_SAFE_NO_PAD_ENGINE, false);
}
#[test]
fn encoded_size_overflow() {
assert_eq!(None, encoded_len(usize::MAX, true));
}
#[test]
fn encode_engine_string_into_nonempty_buffer_doesnt_clobber_prefix() {
let mut orig_data = Vec::new();
let mut prefix = String::new();
let mut encoded_data_no_prefix = String::new();
let mut encoded_data_with_prefix = String::new();
let mut decoded = Vec::new();
let prefix_len_range = Uniform::new(0, 1000);
let input_len_range = Uniform::new(0, 1000);
let mut rng = rand::rngs::SmallRng::from_entropy();
for _ in 0..10_000 {
orig_data.clear();
prefix.clear();
encoded_data_no_prefix.clear();
encoded_data_with_prefix.clear();
decoded.clear();
let input_len = input_len_range.sample(&mut rng);
for _ in 0..input_len {
orig_data.push(rng.gen());
}
let prefix_len = prefix_len_range.sample(&mut rng);
for _ in 0..prefix_len {
// getting convenient random single-byte printable chars that aren't base64 is
// annoying
prefix.push('#');
}
encoded_data_with_prefix.push_str(&prefix);
let engine = random_engine(&mut rng);
engine.encode_string(&orig_data, &mut encoded_data_no_prefix);
engine.encode_string(&orig_data, &mut encoded_data_with_prefix);
assert_eq!(
encoded_data_no_prefix.len() + prefix_len,
encoded_data_with_prefix.len()
);
assert_encode_sanity(
&encoded_data_no_prefix,
engine.config().encode_padding(),
input_len,
);
assert_encode_sanity(
&encoded_data_with_prefix[prefix_len..],
engine.config().encode_padding(),
input_len,
);
// append plain encode onto prefix
prefix.push_str(&encoded_data_no_prefix);
assert_eq!(prefix, encoded_data_with_prefix);
engine
.decode_vec(&encoded_data_no_prefix, &mut decoded)
.unwrap();
assert_eq!(orig_data, decoded);
}
}
#[test]
fn encode_engine_slice_into_nonempty_buffer_doesnt_clobber_suffix() {
let mut orig_data = Vec::new();
let mut encoded_data = Vec::new();
let mut encoded_data_original_state = Vec::new();
let mut decoded = Vec::new();
let input_len_range = Uniform::new(0, 1000);
let mut rng = rand::rngs::SmallRng::from_entropy();
for _ in 0..10_000 {
orig_data.clear();
encoded_data.clear();
encoded_data_original_state.clear();
decoded.clear();
let input_len = input_len_range.sample(&mut rng);
for _ in 0..input_len {
orig_data.push(rng.gen());
}
// plenty of existing garbage in the encoded buffer
for _ in 0..10 * input_len {
encoded_data.push(rng.gen());
}
encoded_data_original_state.extend_from_slice(&encoded_data);
let engine = random_engine(&mut rng);
let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap();
assert_eq!(
encoded_size,
engine.encode_slice(&orig_data, &mut encoded_data).unwrap()
);
assert_encode_sanity(
str::from_utf8(&encoded_data[0..encoded_size]).unwrap(),
engine.config().encode_padding(),
input_len,
);
assert_eq!(
&encoded_data[encoded_size..],
&encoded_data_original_state[encoded_size..]
);
engine
.decode_vec(&encoded_data[0..encoded_size], &mut decoded)
.unwrap();
assert_eq!(orig_data, decoded);
}
}
#[test]
fn encode_to_slice_random_valid_utf8() {
let mut input = Vec::new();
let mut output = Vec::new();
let input_len_range = Uniform::new(0, 1000);
let mut rng = rand::rngs::SmallRng::from_entropy();
for _ in 0..10_000 {
input.clear();
output.clear();
let input_len = input_len_range.sample(&mut rng);
for _ in 0..input_len {
input.push(rng.gen());
}
let config = random_config(&mut rng);
let engine = random_engine(&mut rng);
// fill up the output buffer with garbage
let encoded_size = encoded_len(input_len, config.encode_padding()).unwrap();
for _ in 0..encoded_size {
output.push(rng.gen());
}
let orig_output_buf = output.clone();
let bytes_written = engine.internal_encode(&input, &mut output);
// make sure the part beyond bytes_written is the same garbage it was before
assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]);
// make sure the encoded bytes are UTF-8
let _ = str::from_utf8(&output[0..bytes_written]).unwrap();
}
}
#[test]
fn encode_with_padding_random_valid_utf8() {
let mut input = Vec::new();
let mut output = Vec::new();
let input_len_range = Uniform::new(0, 1000);
let mut rng = rand::rngs::SmallRng::from_entropy();
for _ in 0..10_000 {
input.clear();
output.clear();
let input_len = input_len_range.sample(&mut rng);
for _ in 0..input_len {
input.push(rng.gen());
}
let engine = random_engine(&mut rng);
// fill up the output buffer with garbage
let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap();
for _ in 0..encoded_size + 1000 {
output.push(rng.gen());
}
let orig_output_buf = output.clone();
encode_with_padding(&input, &mut output[0..encoded_size], &engine, encoded_size);
// make sure the part beyond b64 is the same garbage it was before
assert_eq!(orig_output_buf[encoded_size..], output[encoded_size..]);
// make sure the encoded bytes are UTF-8
let _ = str::from_utf8(&output[0..encoded_size]).unwrap();
}
}
#[test]
fn add_padding_random_valid_utf8() {
let mut output = Vec::new();
let mut rng = rand::rngs::SmallRng::from_entropy();
// cover our bases for length % 4
for unpadded_output_len in 0..20 {
output.clear();
// fill output with random
for _ in 0..100 {
output.push(rng.gen());
}
let orig_output_buf = output.clone();
let bytes_written = add_padding(unpadded_output_len, &mut output);
// make sure the part beyond bytes_written is the same garbage it was before
assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]);
// make sure the encoded bytes are UTF-8
let _ = str::from_utf8(&output[0..bytes_written]).unwrap();
}
}
fn assert_encoded_length<E: Engine>(
input_len: usize,
enc_len: usize,
engine: &E,
padded: bool,
) {
assert_eq!(enc_len, encoded_len(input_len, padded).unwrap());
let mut bytes: Vec<u8> = Vec::new();
let mut rng = rand::rngs::SmallRng::from_entropy();
for _ in 0..input_len {
bytes.push(rng.gen());
}
let encoded = engine.encode(&bytes);
assert_encode_sanity(&encoded, padded, input_len);
assert_eq!(enc_len, encoded.len());
}
#[test]
fn encode_imap() {
assert_eq!(
&GeneralPurpose::new(&alphabet::IMAP_MUTF7, NO_PAD).encode(b"\xFB\xFF"),
&GeneralPurpose::new(&alphabet::STANDARD, NO_PAD)
.encode(b"\xFB\xFF")
.replace('/', ",")
);
}
}

View File

@ -1,383 +0,0 @@
use crate::{
engine::{general_purpose::INVALID_VALUE, DecodeEstimate, DecodeMetadata, DecodePaddingMode},
DecodeError, PAD_BYTE,
};
// decode logic operates on chunks of 8 input bytes without padding
const INPUT_CHUNK_LEN: usize = 8;
const DECODED_CHUNK_LEN: usize = 6;
// we read a u64 and write a u64, but a u64 of input only yields 6 bytes of output, so the last
// 2 bytes of any output u64 should not be counted as written to (but must be available in a
// slice).
const DECODED_CHUNK_SUFFIX: usize = 2;
// how many u64's of input to handle at a time
const CHUNKS_PER_FAST_LOOP_BLOCK: usize = 4;
const INPUT_BLOCK_LEN: usize = CHUNKS_PER_FAST_LOOP_BLOCK * INPUT_CHUNK_LEN;
// includes the trailing 2 bytes for the final u64 write
const DECODED_BLOCK_LEN: usize =
CHUNKS_PER_FAST_LOOP_BLOCK * DECODED_CHUNK_LEN + DECODED_CHUNK_SUFFIX;
#[doc(hidden)]
pub struct GeneralPurposeEstimate {
/// Total number of decode chunks, including a possibly partial last chunk
num_chunks: usize,
decoded_len_estimate: usize,
}
impl GeneralPurposeEstimate {
pub(crate) fn new(encoded_len: usize) -> Self {
// Formulas that won't overflow
Self {
num_chunks: encoded_len / INPUT_CHUNK_LEN
+ (encoded_len % INPUT_CHUNK_LEN > 0) as usize,
decoded_len_estimate: (encoded_len / 4 + (encoded_len % 4 > 0) as usize) * 3,
}
}
}
impl DecodeEstimate for GeneralPurposeEstimate {
fn decoded_len_estimate(&self) -> usize {
self.decoded_len_estimate
}
}
/// Helper to avoid duplicating num_chunks calculation, which is costly on short inputs.
/// Returns the decode metadata, or an error.
// We're on the fragile edge of compiler heuristics here. If this is not inlined, slow. If this is
// inlined(always), a different slow. plain ol' inline makes the benchmarks happiest at the moment,
// but this is fragile and the best setting changes with only minor code modifications.
#[inline]
pub(crate) fn decode_helper(
input: &[u8],
estimate: GeneralPurposeEstimate,
output: &mut [u8],
decode_table: &[u8; 256],
decode_allow_trailing_bits: bool,
padding_mode: DecodePaddingMode,
) -> Result<DecodeMetadata, DecodeError> {
let remainder_len = input.len() % INPUT_CHUNK_LEN;
// Because the fast decode loop writes in groups of 8 bytes (unrolled to
// CHUNKS_PER_FAST_LOOP_BLOCK times 8 bytes, where possible) and outputs 8 bytes at a time (of
// which only 6 are valid data), we need to be sure that we stop using the fast decode loop
// soon enough that there will always be 2 more bytes of valid data written after that loop.
let trailing_bytes_to_skip = match remainder_len {
// if input is a multiple of the chunk size, ignore the last chunk as it may have padding,
// and the fast decode logic cannot handle padding
0 => INPUT_CHUNK_LEN,
// 1 and 5 trailing bytes are illegal: can't decode 6 bits of input into a byte
1 | 5 => {
// trailing whitespace is so common that it's worth it to check the last byte to
// possibly return a better error message
if let Some(b) = input.last() {
if *b != PAD_BYTE && decode_table[*b as usize] == INVALID_VALUE {
return Err(DecodeError::InvalidByte(input.len() - 1, *b));
}
}
return Err(DecodeError::InvalidLength);
}
// This will decode to one output byte, which isn't enough to overwrite the 2 extra bytes
// written by the fast decode loop. So, we have to ignore both these 2 bytes and the
// previous chunk.
2 => INPUT_CHUNK_LEN + 2,
// If this is 3 un-padded chars, then it would actually decode to 2 bytes. However, if this
// is an erroneous 2 chars + 1 pad char that would decode to 1 byte, then it should fail
// with an error, not panic from going past the bounds of the output slice, so we let it
// use stage 3 + 4.
3 => INPUT_CHUNK_LEN + 3,
// This can also decode to one output byte because it may be 2 input chars + 2 padding
// chars, which would decode to 1 byte.
4 => INPUT_CHUNK_LEN + 4,
// Everything else is a legal decode len (given that we don't require padding), and will
// decode to at least 2 bytes of output.
_ => remainder_len,
};
// rounded up to include partial chunks
let mut remaining_chunks = estimate.num_chunks;
let mut input_index = 0;
let mut output_index = 0;
{
let length_of_fast_decode_chunks = input.len().saturating_sub(trailing_bytes_to_skip);
// Fast loop, stage 1
// manual unroll to CHUNKS_PER_FAST_LOOP_BLOCK of u64s to amortize slice bounds checks
if let Some(max_start_index) = length_of_fast_decode_chunks.checked_sub(INPUT_BLOCK_LEN) {
while input_index <= max_start_index {
let input_slice = &input[input_index..(input_index + INPUT_BLOCK_LEN)];
let output_slice = &mut output[output_index..(output_index + DECODED_BLOCK_LEN)];
decode_chunk(
&input_slice[0..],
input_index,
decode_table,
&mut output_slice[0..],
)?;
decode_chunk(
&input_slice[8..],
input_index + 8,
decode_table,
&mut output_slice[6..],
)?;
decode_chunk(
&input_slice[16..],
input_index + 16,
decode_table,
&mut output_slice[12..],
)?;
decode_chunk(
&input_slice[24..],
input_index + 24,
decode_table,
&mut output_slice[18..],
)?;
input_index += INPUT_BLOCK_LEN;
output_index += DECODED_BLOCK_LEN - DECODED_CHUNK_SUFFIX;
remaining_chunks -= CHUNKS_PER_FAST_LOOP_BLOCK;
}
}
// Fast loop, stage 2 (aka still pretty fast loop)
// 8 bytes at a time for whatever we didn't do in stage 1.
if let Some(max_start_index) = length_of_fast_decode_chunks.checked_sub(INPUT_CHUNK_LEN) {
while input_index < max_start_index {
decode_chunk(
&input[input_index..(input_index + INPUT_CHUNK_LEN)],
input_index,
decode_table,
&mut output
[output_index..(output_index + DECODED_CHUNK_LEN + DECODED_CHUNK_SUFFIX)],
)?;
output_index += DECODED_CHUNK_LEN;
input_index += INPUT_CHUNK_LEN;
remaining_chunks -= 1;
}
}
}
// Stage 3
// If input length was such that a chunk had to be deferred until after the fast loop
// because decoding it would have produced 2 trailing bytes that wouldn't then be
// overwritten, we decode that chunk here. This way is slower but doesn't write the 2
// trailing bytes.
// However, we still need to avoid the last chunk (partial or complete) because it could
// have padding, so we always do 1 fewer to avoid the last chunk.
for _ in 1..remaining_chunks {
decode_chunk_precise(
&input[input_index..],
input_index,
decode_table,
&mut output[output_index..(output_index + DECODED_CHUNK_LEN)],
)?;
input_index += INPUT_CHUNK_LEN;
output_index += DECODED_CHUNK_LEN;
}
// always have one more (possibly partial) block of 8 input
debug_assert!(input.len() - input_index > 1 || input.is_empty());
debug_assert!(input.len() - input_index <= 8);
super::decode_suffix::decode_suffix(
input,
input_index,
output,
output_index,
decode_table,
decode_allow_trailing_bits,
padding_mode,
)
}
/// Decode 8 bytes of input into 6 bytes of output. 8 bytes of output will be written, but only the
/// first 6 of those contain meaningful data.
///
/// `input` is the bytes to decode, of which the first 8 bytes will be processed.
/// `index_at_start_of_input` is the offset in the overall input (used for reporting errors
/// accurately)
/// `decode_table` is the lookup table for the particular base64 alphabet.
/// `output` will have its first 8 bytes overwritten, of which only the first 6 are valid decoded
/// data.
// yes, really inline (worth 30-50% speedup)
#[inline(always)]
fn decode_chunk(
input: &[u8],
index_at_start_of_input: usize,
decode_table: &[u8; 256],
output: &mut [u8],
) -> Result<(), DecodeError> {
let morsel = decode_table[input[0] as usize];
if morsel == INVALID_VALUE {
return Err(DecodeError::InvalidByte(index_at_start_of_input, input[0]));
}
let mut accum = (morsel as u64) << 58;
let morsel = decode_table[input[1] as usize];
if morsel == INVALID_VALUE {
return Err(DecodeError::InvalidByte(
index_at_start_of_input + 1,
input[1],
));
}
accum |= (morsel as u64) << 52;
let morsel = decode_table[input[2] as usize];
if morsel == INVALID_VALUE {
return Err(DecodeError::InvalidByte(
index_at_start_of_input + 2,
input[2],
));
}
accum |= (morsel as u64) << 46;
let morsel = decode_table[input[3] as usize];
if morsel == INVALID_VALUE {
return Err(DecodeError::InvalidByte(
index_at_start_of_input + 3,
input[3],
));
}
accum |= (morsel as u64) << 40;
let morsel = decode_table[input[4] as usize];
if morsel == INVALID_VALUE {
return Err(DecodeError::InvalidByte(
index_at_start_of_input + 4,
input[4],
));
}
accum |= (morsel as u64) << 34;
let morsel = decode_table[input[5] as usize];
if morsel == INVALID_VALUE {
return Err(DecodeError::InvalidByte(
index_at_start_of_input + 5,
input[5],
));
}
accum |= (morsel as u64) << 28;
let morsel = decode_table[input[6] as usize];
if morsel == INVALID_VALUE {
return Err(DecodeError::InvalidByte(
index_at_start_of_input + 6,
input[6],
));
}
accum |= (morsel as u64) << 22;
let morsel = decode_table[input[7] as usize];
if morsel == INVALID_VALUE {
return Err(DecodeError::InvalidByte(
index_at_start_of_input + 7,
input[7],
));
}
accum |= (morsel as u64) << 16;
write_u64(output, accum);
Ok(())
}
/// Decode an 8-byte chunk, but only write the 6 bytes actually decoded instead of including 2
/// trailing garbage bytes.
#[inline]
fn decode_chunk_precise(
input: &[u8],
index_at_start_of_input: usize,
decode_table: &[u8; 256],
output: &mut [u8],
) -> Result<(), DecodeError> {
let mut tmp_buf = [0_u8; 8];
decode_chunk(
input,
index_at_start_of_input,
decode_table,
&mut tmp_buf[..],
)?;
output[0..6].copy_from_slice(&tmp_buf[0..6]);
Ok(())
}
#[inline]
fn write_u64(output: &mut [u8], value: u64) {
output[..8].copy_from_slice(&value.to_be_bytes());
}
#[cfg(test)]
mod tests {
use super::*;
use crate::engine::general_purpose::STANDARD;
#[test]
fn decode_chunk_precise_writes_only_6_bytes() {
let input = b"Zm9vYmFy"; // "foobar"
let mut output = [0_u8, 1, 2, 3, 4, 5, 6, 7];
decode_chunk_precise(&input[..], 0, &STANDARD.decode_table, &mut output).unwrap();
assert_eq!(&vec![b'f', b'o', b'o', b'b', b'a', b'r', 6, 7], &output);
}
#[test]
fn decode_chunk_writes_8_bytes() {
let input = b"Zm9vYmFy"; // "foobar"
let mut output = [0_u8, 1, 2, 3, 4, 5, 6, 7];
decode_chunk(&input[..], 0, &STANDARD.decode_table, &mut output).unwrap();
assert_eq!(&vec![b'f', b'o', b'o', b'b', b'a', b'r', 0, 0], &output);
}
#[test]
fn estimate_short_lengths() {
for (range, (num_chunks, decoded_len_estimate)) in [
(0..=0, (0, 0)),
(1..=4, (1, 3)),
(5..=8, (1, 6)),
(9..=12, (2, 9)),
(13..=16, (2, 12)),
(17..=20, (3, 15)),
] {
for encoded_len in range {
let estimate = GeneralPurposeEstimate::new(encoded_len);
assert_eq!(num_chunks, estimate.num_chunks);
assert_eq!(decoded_len_estimate, estimate.decoded_len_estimate);
}
}
}
#[test]
fn estimate_via_u128_inflation() {
// cover both ends of usize
(0..1000)
.chain(usize::MAX - 1000..=usize::MAX)
.for_each(|encoded_len| {
// inflate to 128 bit type to be able to safely use the easy formulas
let len_128 = encoded_len as u128;
let estimate = GeneralPurposeEstimate::new(encoded_len);
assert_eq!(
((len_128 + (INPUT_CHUNK_LEN - 1) as u128) / (INPUT_CHUNK_LEN as u128))
as usize,
estimate.num_chunks
);
assert_eq!(
((len_128 + 3) / 4 * 3) as usize,
estimate.decoded_len_estimate
);
})
}
}

View File

@ -1,168 +0,0 @@
use crate::{
engine::{general_purpose::INVALID_VALUE, DecodeMetadata, DecodePaddingMode},
DecodeError, PAD_BYTE,
};
/// Decode the last 1-8 bytes, checking for trailing set bits and padding per the provided
/// parameters.
///
/// Returns the decode metadata representing the total number of bytes decoded, including the ones
/// indicated as already written by `output_index`.
pub(crate) fn decode_suffix(
input: &[u8],
input_index: usize,
output: &mut [u8],
mut output_index: usize,
decode_table: &[u8; 256],
decode_allow_trailing_bits: bool,
padding_mode: DecodePaddingMode,
) -> Result<DecodeMetadata, DecodeError> {
// Decode any leftovers that aren't a complete input block of 8 bytes.
// Use a u64 as a stack-resident 8 byte buffer.
let mut leftover_bits: u64 = 0;
let mut morsels_in_leftover = 0;
let mut padding_bytes = 0;
let mut first_padding_index: usize = 0;
let mut last_symbol = 0_u8;
let start_of_leftovers = input_index;
for (i, &b) in input[start_of_leftovers..].iter().enumerate() {
// '=' padding
if b == PAD_BYTE {
// There can be bad padding bytes in a few ways:
// 1 - Padding with non-padding characters after it
// 2 - Padding after zero or one characters in the current quad (should only
// be after 2 or 3 chars)
// 3 - More than two characters of padding. If 3 or 4 padding chars
// are in the same quad, that implies it will be caught by #2.
// If it spreads from one quad to another, it will be an invalid byte
// in the first quad.
// 4 - Non-canonical padding -- 1 byte when it should be 2, etc.
// Per config, non-canonical but still functional non- or partially-padded base64
// may be treated as an error condition.
if i % 4 < 2 {
// Check for case #2.
let bad_padding_index = start_of_leftovers
+ if padding_bytes > 0 {
// If we've already seen padding, report the first padding index.
// This is to be consistent with the normal decode logic: it will report an
// error on the first padding character (since it doesn't expect to see
// anything but actual encoded data).
// This could only happen if the padding started in the previous quad since
// otherwise this case would have been hit at i % 4 == 0 if it was the same
// quad.
first_padding_index
} else {
// haven't seen padding before, just use where we are now
i
};
return Err(DecodeError::InvalidByte(bad_padding_index, b));
}
if padding_bytes == 0 {
first_padding_index = i;
}
padding_bytes += 1;
continue;
}
// Check for case #1.
// To make '=' handling consistent with the main loop, don't allow
// non-suffix '=' in trailing chunk either. Report error as first
// erroneous padding.
if padding_bytes > 0 {
return Err(DecodeError::InvalidByte(
start_of_leftovers + first_padding_index,
PAD_BYTE,
));
}
last_symbol = b;
// can use up to 8 * 6 = 48 bits of the u64, if last chunk has no padding.
// Pack the leftovers from left to right.
let shift = 64 - (morsels_in_leftover + 1) * 6;
let morsel = decode_table[b as usize];
if morsel == INVALID_VALUE {
return Err(DecodeError::InvalidByte(start_of_leftovers + i, b));
}
leftover_bits |= (morsel as u64) << shift;
morsels_in_leftover += 1;
}
match padding_mode {
DecodePaddingMode::Indifferent => { /* everything we care about was already checked */ }
DecodePaddingMode::RequireCanonical => {
if (padding_bytes + morsels_in_leftover) % 4 != 0 {
return Err(DecodeError::InvalidPadding);
}
}
DecodePaddingMode::RequireNone => {
if padding_bytes > 0 {
// check at the end to make sure we let the cases of padding that should be InvalidByte
// get hit
return Err(DecodeError::InvalidPadding);
}
}
}
// When encoding 1 trailing byte (e.g. 0xFF), 2 base64 bytes ("/w") are needed.
// / is the symbol for 63 (0x3F, bottom 6 bits all set) and w is 48 (0x30, top 2 bits
// of bottom 6 bits set).
// When decoding two symbols back to one trailing byte, any final symbol higher than
// w would still decode to the original byte because we only care about the top two
// bits in the bottom 6, but would be a non-canonical encoding. So, we calculate a
// mask based on how many bits are used for just the canonical encoding, and optionally
// error if any other bits are set. In the example of one encoded byte -> 2 symbols,
// 2 symbols can technically encode 12 bits, but the last 4 are non canonical, and
// useless since there are no more symbols to provide the necessary 4 additional bits
// to finish the second original byte.
let leftover_bits_ready_to_append = match morsels_in_leftover {
0 => 0,
2 => 8,
3 => 16,
4 => 24,
6 => 32,
7 => 40,
8 => 48,
// can also be detected as case #2 bad padding above
_ => unreachable!(
"Impossible: must only have 0 to 8 input bytes in last chunk, with no invalid lengths"
),
};
// if there are bits set outside the bits we care about, last symbol encodes trailing bits that
// will not be included in the output
let mask = !0 >> leftover_bits_ready_to_append;
if !decode_allow_trailing_bits && (leftover_bits & mask) != 0 {
// last morsel is at `morsels_in_leftover` - 1
return Err(DecodeError::InvalidLastSymbol(
start_of_leftovers + morsels_in_leftover - 1,
last_symbol,
));
}
// TODO benchmark simply converting to big endian bytes
let mut leftover_bits_appended_to_buf = 0;
while leftover_bits_appended_to_buf < leftover_bits_ready_to_append {
// `as` simply truncates the higher bits, which is what we want here
let selected_bits = (leftover_bits >> (56 - leftover_bits_appended_to_buf)) as u8;
output[output_index] = selected_bits;
output_index += 1;
leftover_bits_appended_to_buf += 8;
}
Ok(DecodeMetadata::new(
output_index,
if padding_bytes > 0 {
Some(input_index + first_padding_index)
} else {
None
},
))
}

View File

@ -1,352 +0,0 @@
//! Provides the [GeneralPurpose] engine and associated config types.
use crate::{
alphabet,
alphabet::Alphabet,
engine::{Config, DecodeMetadata, DecodePaddingMode},
DecodeError,
};
use core::convert::TryInto;
mod decode;
pub(crate) mod decode_suffix;
pub use decode::GeneralPurposeEstimate;
pub(crate) const INVALID_VALUE: u8 = 255;
/// A general-purpose base64 engine.
///
/// - It uses no vector CPU instructions, so it will work on any system.
/// - It is reasonably fast (~2-3GiB/s).
/// - It is not constant-time, though, so it is vulnerable to timing side-channel attacks. For loading cryptographic keys, etc, it is suggested to use the forthcoming constant-time implementation.
#[derive(Debug, Clone)]
pub struct GeneralPurpose {
encode_table: [u8; 64],
decode_table: [u8; 256],
config: GeneralPurposeConfig,
}
impl GeneralPurpose {
/// Create a `GeneralPurpose` engine from an [Alphabet].
///
/// While not very expensive to initialize, ideally these should be cached
/// if the engine will be used repeatedly.
pub const fn new(alphabet: &Alphabet, config: GeneralPurposeConfig) -> Self {
Self {
encode_table: encode_table(alphabet),
decode_table: decode_table(alphabet),
config,
}
}
}
impl super::Engine for GeneralPurpose {
type Config = GeneralPurposeConfig;
type DecodeEstimate = GeneralPurposeEstimate;
fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize {
let mut input_index: usize = 0;
const BLOCKS_PER_FAST_LOOP: usize = 4;
const LOW_SIX_BITS: u64 = 0x3F;
// we read 8 bytes at a time (u64) but only actually consume 6 of those bytes. Thus, we need
// 2 trailing bytes to be available to read..
let last_fast_index = input.len().saturating_sub(BLOCKS_PER_FAST_LOOP * 6 + 2);
let mut output_index = 0;
if last_fast_index > 0 {
while input_index <= last_fast_index {
// Major performance wins from letting the optimizer do the bounds check once, mostly
// on the output side
let input_chunk =
&input[input_index..(input_index + (BLOCKS_PER_FAST_LOOP * 6 + 2))];
let output_chunk =
&mut output[output_index..(output_index + BLOCKS_PER_FAST_LOOP * 8)];
// Hand-unrolling for 32 vs 16 or 8 bytes produces yields performance about equivalent
// to unsafe pointer code on a Xeon E5-1650v3. 64 byte unrolling was slightly better for
// large inputs but significantly worse for 50-byte input, unsurprisingly. I suspect
// that it's a not uncommon use case to encode smallish chunks of data (e.g. a 64-byte
// SHA-512 digest), so it would be nice if that fit in the unrolled loop at least once.
// Plus, single-digit percentage performance differences might well be quite different
// on different hardware.
let input_u64 = read_u64(&input_chunk[0..]);
output_chunk[0] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
output_chunk[1] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
output_chunk[2] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
output_chunk[3] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
output_chunk[4] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
output_chunk[5] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
output_chunk[6] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
output_chunk[7] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
let input_u64 = read_u64(&input_chunk[6..]);
output_chunk[8] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
output_chunk[9] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
output_chunk[10] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
output_chunk[11] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
output_chunk[12] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
output_chunk[13] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
output_chunk[14] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
output_chunk[15] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
let input_u64 = read_u64(&input_chunk[12..]);
output_chunk[16] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
output_chunk[17] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
output_chunk[18] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
output_chunk[19] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
output_chunk[20] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
output_chunk[21] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
output_chunk[22] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
output_chunk[23] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
let input_u64 = read_u64(&input_chunk[18..]);
output_chunk[24] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
output_chunk[25] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
output_chunk[26] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
output_chunk[27] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
output_chunk[28] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
output_chunk[29] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
output_chunk[30] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
output_chunk[31] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
output_index += BLOCKS_PER_FAST_LOOP * 8;
input_index += BLOCKS_PER_FAST_LOOP * 6;
}
}
// Encode what's left after the fast loop.
const LOW_SIX_BITS_U8: u8 = 0x3F;
let rem = input.len() % 3;
let start_of_rem = input.len() - rem;
// start at the first index not handled by fast loop, which may be 0.
while input_index < start_of_rem {
let input_chunk = &input[input_index..(input_index + 3)];
let output_chunk = &mut output[output_index..(output_index + 4)];
output_chunk[0] = self.encode_table[(input_chunk[0] >> 2) as usize];
output_chunk[1] = self.encode_table
[((input_chunk[0] << 4 | input_chunk[1] >> 4) & LOW_SIX_BITS_U8) as usize];
output_chunk[2] = self.encode_table
[((input_chunk[1] << 2 | input_chunk[2] >> 6) & LOW_SIX_BITS_U8) as usize];
output_chunk[3] = self.encode_table[(input_chunk[2] & LOW_SIX_BITS_U8) as usize];
input_index += 3;
output_index += 4;
}
if rem == 2 {
output[output_index] = self.encode_table[(input[start_of_rem] >> 2) as usize];
output[output_index + 1] =
self.encode_table[((input[start_of_rem] << 4 | input[start_of_rem + 1] >> 4)
& LOW_SIX_BITS_U8) as usize];
output[output_index + 2] =
self.encode_table[((input[start_of_rem + 1] << 2) & LOW_SIX_BITS_U8) as usize];
output_index += 3;
} else if rem == 1 {
output[output_index] = self.encode_table[(input[start_of_rem] >> 2) as usize];
output[output_index + 1] =
self.encode_table[((input[start_of_rem] << 4) & LOW_SIX_BITS_U8) as usize];
output_index += 2;
}
output_index
}
fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate {
GeneralPurposeEstimate::new(input_len)
}
fn internal_decode(
&self,
input: &[u8],
output: &mut [u8],
estimate: Self::DecodeEstimate,
) -> Result<DecodeMetadata, DecodeError> {
decode::decode_helper(
input,
estimate,
output,
&self.decode_table,
self.config.decode_allow_trailing_bits,
self.config.decode_padding_mode,
)
}
fn config(&self) -> &Self::Config {
&self.config
}
}
/// Returns a table mapping a 6-bit index to the ASCII byte encoding of the index
pub(crate) const fn encode_table(alphabet: &Alphabet) -> [u8; 64] {
// the encode table is just the alphabet:
// 6-bit index lookup -> printable byte
let mut encode_table = [0_u8; 64];
{
let mut index = 0;
while index < 64 {
encode_table[index] = alphabet.symbols[index];
index += 1;
}
}
encode_table
}
/// Returns a table mapping base64 bytes as the lookup index to either:
/// - [INVALID_VALUE] for bytes that aren't members of the alphabet
/// - a byte whose lower 6 bits are the value that was encoded into the index byte
pub(crate) const fn decode_table(alphabet: &Alphabet) -> [u8; 256] {
let mut decode_table = [INVALID_VALUE; 256];
// Since the table is full of `INVALID_VALUE` already, we only need to overwrite
// the parts that are valid.
let mut index = 0;
while index < 64 {
// The index in the alphabet is the 6-bit value we care about.
// Since the index is in 0-63, it is safe to cast to u8.
decode_table[alphabet.symbols[index] as usize] = index as u8;
index += 1;
}
decode_table
}
#[inline]
fn read_u64(s: &[u8]) -> u64 {
u64::from_be_bytes(s[..8].try_into().unwrap())
}
/// Contains configuration parameters for base64 encoding and decoding.
///
/// ```
/// # use base64::engine::GeneralPurposeConfig;
/// let config = GeneralPurposeConfig::new()
/// .with_encode_padding(false);
/// // further customize using `.with_*` methods as needed
/// ```
///
/// The constants [PAD] and [NO_PAD] cover most use cases.
///
/// To specify the characters used, see [Alphabet].
#[derive(Clone, Copy, Debug)]
pub struct GeneralPurposeConfig {
encode_padding: bool,
decode_allow_trailing_bits: bool,
decode_padding_mode: DecodePaddingMode,
}
impl GeneralPurposeConfig {
/// Create a new config with `padding` = `true`, `decode_allow_trailing_bits` = `false`, and
/// `decode_padding_mode = DecodePaddingMode::RequireCanonicalPadding`.
///
/// This probably matches most people's expectations, but consider disabling padding to save
/// a few bytes unless you specifically need it for compatibility with some legacy system.
pub const fn new() -> Self {
Self {
// RFC states that padding must be applied by default
encode_padding: true,
decode_allow_trailing_bits: false,
decode_padding_mode: DecodePaddingMode::RequireCanonical,
}
}
/// Create a new config based on `self` with an updated `padding` setting.
///
/// If `padding` is `true`, encoding will append either 1 or 2 `=` padding characters as needed
/// to produce an output whose length is a multiple of 4.
///
/// Padding is not needed for correct decoding and only serves to waste bytes, but it's in the
/// [spec](https://datatracker.ietf.org/doc/html/rfc4648#section-3.2).
///
/// For new applications, consider not using padding if the decoders you're using don't require
/// padding to be present.
pub const fn with_encode_padding(self, padding: bool) -> Self {
Self {
encode_padding: padding,
..self
}
}
/// Create a new config based on `self` with an updated `decode_allow_trailing_bits` setting.
///
/// Most users will not need to configure this. It's useful if you need to decode base64
/// produced by a buggy encoder that has bits set in the unused space on the last base64
/// character as per [forgiving-base64 decode](https://infra.spec.whatwg.org/#forgiving-base64-decode).
/// If invalid trailing bits are present and this is `true`, those bits will
/// be silently ignored, else `DecodeError::InvalidLastSymbol` will be emitted.
pub const fn with_decode_allow_trailing_bits(self, allow: bool) -> Self {
Self {
decode_allow_trailing_bits: allow,
..self
}
}
/// Create a new config based on `self` with an updated `decode_padding_mode` setting.
///
/// Padding is not useful in terms of representing encoded data -- it makes no difference to
/// the decoder if padding is present or not, so if you have some un-padded input to decode, it
/// is perfectly fine to use `DecodePaddingMode::Indifferent` to prevent errors from being
/// emitted.
///
/// However, since in practice
/// [people who learned nothing from BER vs DER seem to expect base64 to have one canonical encoding](https://eprint.iacr.org/2022/361),
/// the default setting is the stricter `DecodePaddingMode::RequireCanonicalPadding`.
///
/// Or, if "canonical" in your circumstance means _no_ padding rather than padding to the
/// next multiple of four, there's `DecodePaddingMode::RequireNoPadding`.
pub const fn with_decode_padding_mode(self, mode: DecodePaddingMode) -> Self {
Self {
decode_padding_mode: mode,
..self
}
}
}
impl Default for GeneralPurposeConfig {
/// Delegates to [GeneralPurposeConfig::new].
fn default() -> Self {
Self::new()
}
}
impl Config for GeneralPurposeConfig {
fn encode_padding(&self) -> bool {
self.encode_padding
}
}
/// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [PAD] config.
pub const STANDARD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, PAD);
/// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [NO_PAD] config.
pub const STANDARD_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD);
/// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [PAD] config.
pub const URL_SAFE: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, PAD);
/// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [NO_PAD] config.
pub const URL_SAFE_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, NO_PAD);
/// Include padding bytes when encoding, and require that they be present when decoding.
///
/// This is the standard per the base64 RFC, but consider using [NO_PAD] instead as padding serves
/// little purpose in practice.
pub const PAD: GeneralPurposeConfig = GeneralPurposeConfig::new();
/// Don't add padding when encoding, and require no padding when decoding.
pub const NO_PAD: GeneralPurposeConfig = GeneralPurposeConfig::new()
.with_encode_padding(false)
.with_decode_padding_mode(DecodePaddingMode::RequireNone);

View File

@ -1,466 +0,0 @@
//! Provides the [Engine] abstraction and out of the box implementations.
#[cfg(any(feature = "alloc", test))]
use crate::chunked_encoder;
use crate::{
encode::{encode_with_padding, EncodeSliceError},
encoded_len, DecodeError, DecodeSliceError,
};
#[cfg(any(feature = "alloc", test))]
use alloc::vec::Vec;
#[cfg(any(feature = "alloc", test))]
use alloc::{string::String, vec};
pub mod general_purpose;
#[cfg(test)]
mod naive;
#[cfg(test)]
mod tests;
pub use general_purpose::{GeneralPurpose, GeneralPurposeConfig};
/// An `Engine` provides low-level encoding and decoding operations that all other higher-level parts of the API use. Users of the library will generally not need to implement this.
///
/// Different implementations offer different characteristics. The library currently ships with
/// [GeneralPurpose] that offers good speed and works on any CPU, with more choices
/// coming later, like a constant-time one when side channel resistance is called for, and vendor-specific vectorized ones for more speed.
///
/// See [general_purpose::STANDARD_NO_PAD] if you just want standard base64. Otherwise, when possible, it's
/// recommended to store the engine in a `const` so that references to it won't pose any lifetime
/// issues, and to avoid repeating the cost of engine setup.
///
/// Since almost nobody will need to implement `Engine`, docs for internal methods are hidden.
// When adding an implementation of Engine, include them in the engine test suite:
// - add an implementation of [engine::tests::EngineWrapper]
// - add the implementation to the `all_engines` macro
// All tests run on all engines listed in the macro.
pub trait Engine: Send + Sync {
/// The config type used by this engine
type Config: Config;
/// The decode estimate used by this engine
type DecodeEstimate: DecodeEstimate;
/// This is not meant to be called directly; it is only for `Engine` implementors.
/// See the other `encode*` functions on this trait.
///
/// Encode the `input` bytes into the `output` buffer based on the mapping in `encode_table`.
///
/// `output` will be long enough to hold the encoded data.
///
/// Returns the number of bytes written.
///
/// No padding should be written; that is handled separately.
///
/// Must not write any bytes into the output slice other than the encoded data.
#[doc(hidden)]
fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize;
/// This is not meant to be called directly; it is only for `Engine` implementors.
///
/// As an optimization to prevent the decoded length from being calculated twice, it is
/// sometimes helpful to have a conservative estimate of the decoded size before doing the
/// decoding, so this calculation is done separately and passed to [Engine::decode()] as needed.
#[doc(hidden)]
fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate;
/// This is not meant to be called directly; it is only for `Engine` implementors.
/// See the other `decode*` functions on this trait.
///
/// Decode `input` base64 bytes into the `output` buffer.
///
/// `decode_estimate` is the result of [Engine::internal_decoded_len_estimate()], which is passed in to avoid
/// calculating it again (expensive on short inputs).`
///
/// Each complete 4-byte chunk of encoded data decodes to 3 bytes of decoded data, but this
/// function must also handle the final possibly partial chunk.
/// If the input length is not a multiple of 4, or uses padding bytes to reach a multiple of 4,
/// the trailing 2 or 3 bytes must decode to 1 or 2 bytes, respectively, as per the
/// [RFC](https://tools.ietf.org/html/rfc4648#section-3.5).
///
/// Decoding must not write any bytes into the output slice other than the decoded data.
///
/// Non-canonical trailing bits in the final tokens or non-canonical padding must be reported as
/// errors unless the engine is configured otherwise.
///
/// # Panics
///
/// Panics if `output` is too small.
#[doc(hidden)]
fn internal_decode(
&self,
input: &[u8],
output: &mut [u8],
decode_estimate: Self::DecodeEstimate,
) -> Result<DecodeMetadata, DecodeError>;
/// Returns the config for this engine.
fn config(&self) -> &Self::Config;
/// Encode arbitrary octets as base64 using the provided `Engine`.
/// Returns a `String`.
///
/// # Example
///
/// ```rust
/// use base64::{Engine as _, engine::{self, general_purpose}, alphabet};
///
/// let b64 = general_purpose::STANDARD.encode(b"hello world~");
/// println!("{}", b64);
///
/// const CUSTOM_ENGINE: engine::GeneralPurpose =
/// engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::NO_PAD);
///
/// let b64_url = CUSTOM_ENGINE.encode(b"hello internet~");
#[cfg(any(feature = "alloc", test))]
#[inline]
fn encode<T: AsRef<[u8]>>(&self, input: T) -> String {
fn inner<E>(engine: &E, input_bytes: &[u8]) -> String
where
E: Engine + ?Sized,
{
let encoded_size = encoded_len(input_bytes.len(), engine.config().encode_padding())
.expect("integer overflow when calculating buffer size");
let mut buf = vec![0; encoded_size];
encode_with_padding(input_bytes, &mut buf[..], engine, encoded_size);
String::from_utf8(buf).expect("Invalid UTF8")
}
inner(self, input.as_ref())
}
/// Encode arbitrary octets as base64 into a supplied `String`.
/// Writes into the supplied `String`, which may allocate if its internal buffer isn't big enough.
///
/// # Example
///
/// ```rust
/// use base64::{Engine as _, engine::{self, general_purpose}, alphabet};
/// const CUSTOM_ENGINE: engine::GeneralPurpose =
/// engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::NO_PAD);
///
/// fn main() {
/// let mut buf = String::new();
/// general_purpose::STANDARD.encode_string(b"hello world~", &mut buf);
/// println!("{}", buf);
///
/// buf.clear();
/// CUSTOM_ENGINE.encode_string(b"hello internet~", &mut buf);
/// println!("{}", buf);
/// }
/// ```
#[cfg(any(feature = "alloc", test))]
#[inline]
fn encode_string<T: AsRef<[u8]>>(&self, input: T, output_buf: &mut String) {
fn inner<E>(engine: &E, input_bytes: &[u8], output_buf: &mut String)
where
E: Engine + ?Sized,
{
let mut sink = chunked_encoder::StringSink::new(output_buf);
chunked_encoder::ChunkedEncoder::new(engine)
.encode(input_bytes, &mut sink)
.expect("Writing to a String shouldn't fail");
}
inner(self, input.as_ref(), output_buf)
}
/// Encode arbitrary octets as base64 into a supplied slice.
/// Writes into the supplied output buffer.
///
/// This is useful if you wish to avoid allocation entirely (e.g. encoding into a stack-resident
/// or statically-allocated buffer).
///
/// # Example
///
#[cfg_attr(feature = "alloc", doc = "```")]
#[cfg_attr(not(feature = "alloc"), doc = "```ignore")]
/// use base64::{Engine as _, engine::general_purpose};
/// let s = b"hello internet!";
/// let mut buf = Vec::new();
/// // make sure we'll have a slice big enough for base64 + padding
/// buf.resize(s.len() * 4 / 3 + 4, 0);
///
/// let bytes_written = general_purpose::STANDARD.encode_slice(s, &mut buf).unwrap();
///
/// // shorten our vec down to just what was written
/// buf.truncate(bytes_written);
///
/// assert_eq!(s, general_purpose::STANDARD.decode(&buf).unwrap().as_slice());
/// ```
#[inline]
fn encode_slice<T: AsRef<[u8]>>(
&self,
input: T,
output_buf: &mut [u8],
) -> Result<usize, EncodeSliceError> {
fn inner<E>(
engine: &E,
input_bytes: &[u8],
output_buf: &mut [u8],
) -> Result<usize, EncodeSliceError>
where
E: Engine + ?Sized,
{
let encoded_size = encoded_len(input_bytes.len(), engine.config().encode_padding())
.expect("usize overflow when calculating buffer size");
if output_buf.len() < encoded_size {
return Err(EncodeSliceError::OutputSliceTooSmall);
}
let b64_output = &mut output_buf[0..encoded_size];
encode_with_padding(input_bytes, b64_output, engine, encoded_size);
Ok(encoded_size)
}
inner(self, input.as_ref(), output_buf)
}
/// Decode the input into a new `Vec`.
///
/// # Example
///
/// ```rust
/// use base64::{Engine as _, alphabet, engine::{self, general_purpose}};
///
/// let bytes = general_purpose::STANDARD
/// .decode("aGVsbG8gd29ybGR+Cg==").unwrap();
/// println!("{:?}", bytes);
///
/// // custom engine setup
/// let bytes_url = engine::GeneralPurpose::new(
/// &alphabet::URL_SAFE,
/// general_purpose::NO_PAD)
/// .decode("aGVsbG8gaW50ZXJuZXR-Cg").unwrap();
/// println!("{:?}", bytes_url);
/// ```
#[cfg(any(feature = "alloc", test))]
#[inline]
fn decode<T: AsRef<[u8]>>(&self, input: T) -> Result<Vec<u8>, DecodeError> {
fn inner<E>(engine: &E, input_bytes: &[u8]) -> Result<Vec<u8>, DecodeError>
where
E: Engine + ?Sized,
{
let estimate = engine.internal_decoded_len_estimate(input_bytes.len());
let mut buffer = vec![0; estimate.decoded_len_estimate()];
let bytes_written = engine
.internal_decode(input_bytes, &mut buffer, estimate)?
.decoded_len;
buffer.truncate(bytes_written);
Ok(buffer)
}
inner(self, input.as_ref())
}
/// Decode the `input` into the supplied `buffer`.
///
/// Writes into the supplied `Vec`, which may allocate if its internal buffer isn't big enough.
/// Returns a `Result` containing an empty tuple, aka `()`.
///
/// # Example
///
/// ```rust
/// use base64::{Engine as _, alphabet, engine::{self, general_purpose}};
/// const CUSTOM_ENGINE: engine::GeneralPurpose =
/// engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::PAD);
///
/// fn main() {
/// use base64::Engine;
/// let mut buffer = Vec::<u8>::new();
/// // with the default engine
/// general_purpose::STANDARD
/// .decode_vec("aGVsbG8gd29ybGR+Cg==", &mut buffer,).unwrap();
/// println!("{:?}", buffer);
///
/// buffer.clear();
///
/// // with a custom engine
/// CUSTOM_ENGINE.decode_vec(
/// "aGVsbG8gaW50ZXJuZXR-Cg==",
/// &mut buffer,
/// ).unwrap();
/// println!("{:?}", buffer);
/// }
/// ```
#[cfg(any(feature = "alloc", test))]
#[inline]
fn decode_vec<T: AsRef<[u8]>>(
&self,
input: T,
buffer: &mut Vec<u8>,
) -> Result<(), DecodeError> {
fn inner<E>(engine: &E, input_bytes: &[u8], buffer: &mut Vec<u8>) -> Result<(), DecodeError>
where
E: Engine + ?Sized,
{
let starting_output_len = buffer.len();
let estimate = engine.internal_decoded_len_estimate(input_bytes.len());
let total_len_estimate = estimate
.decoded_len_estimate()
.checked_add(starting_output_len)
.expect("Overflow when calculating output buffer length");
buffer.resize(total_len_estimate, 0);
let buffer_slice = &mut buffer.as_mut_slice()[starting_output_len..];
let bytes_written = engine
.internal_decode(input_bytes, buffer_slice, estimate)?
.decoded_len;
buffer.truncate(starting_output_len + bytes_written);
Ok(())
}
inner(self, input.as_ref(), buffer)
}
/// Decode the input into the provided output slice.
///
/// Returns the number of bytes written to the slice, or an error if `output` is smaller than
/// the estimated decoded length.
///
/// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end).
///
/// See [crate::decoded_len_estimate] for calculating buffer sizes.
///
/// See [Engine::decode_slice_unchecked] for a version that panics instead of returning an error
/// if the output buffer is too small.
#[inline]
fn decode_slice<T: AsRef<[u8]>>(
&self,
input: T,
output: &mut [u8],
) -> Result<usize, DecodeSliceError> {
fn inner<E>(
engine: &E,
input_bytes: &[u8],
output: &mut [u8],
) -> Result<usize, DecodeSliceError>
where
E: Engine + ?Sized,
{
let estimate = engine.internal_decoded_len_estimate(input_bytes.len());
if output.len() < estimate.decoded_len_estimate() {
return Err(DecodeSliceError::OutputSliceTooSmall);
}
engine
.internal_decode(input_bytes, output, estimate)
.map_err(|e| e.into())
.map(|dm| dm.decoded_len)
}
inner(self, input.as_ref(), output)
}
/// Decode the input into the provided output slice.
///
/// Returns the number of bytes written to the slice.
///
/// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end).
///
/// See [crate::decoded_len_estimate] for calculating buffer sizes.
///
/// See [Engine::decode_slice] for a version that returns an error instead of panicking if the output
/// buffer is too small.
///
/// # Panics
///
/// Panics if the provided output buffer is too small for the decoded data.
#[inline]
fn decode_slice_unchecked<T: AsRef<[u8]>>(
&self,
input: T,
output: &mut [u8],
) -> Result<usize, DecodeError> {
fn inner<E>(engine: &E, input_bytes: &[u8], output: &mut [u8]) -> Result<usize, DecodeError>
where
E: Engine + ?Sized,
{
engine
.internal_decode(
input_bytes,
output,
engine.internal_decoded_len_estimate(input_bytes.len()),
)
.map(|dm| dm.decoded_len)
}
inner(self, input.as_ref(), output)
}
}
/// The minimal level of configuration that engines must support.
pub trait Config {
/// Returns `true` if padding should be added after the encoded output.
///
/// Padding is added outside the engine's encode() since the engine may be used
/// to encode only a chunk of the overall output, so it can't always know when
/// the output is "done" and would therefore need padding (if configured).
// It could be provided as a separate parameter when encoding, but that feels like
// leaking an implementation detail to the user, and it's hopefully more convenient
// to have to only pass one thing (the engine) to any part of the API.
fn encode_padding(&self) -> bool;
}
/// The decode estimate used by an engine implementation. Users do not need to interact with this;
/// it is only for engine implementors.
///
/// Implementors may store relevant data here when constructing this to avoid having to calculate
/// them again during actual decoding.
pub trait DecodeEstimate {
/// Returns a conservative (err on the side of too big) estimate of the decoded length to use
/// for pre-allocating buffers, etc.
///
/// The estimate must be no larger than the next largest complete triple of decoded bytes.
/// That is, the final quad of tokens to decode may be assumed to be complete with no padding.
fn decoded_len_estimate(&self) -> usize;
}
/// Controls how pad bytes are handled when decoding.
///
/// Each [Engine] must support at least the behavior indicated by
/// [DecodePaddingMode::RequireCanonical], and may support other modes.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DecodePaddingMode {
/// Canonical padding is allowed, but any fewer padding bytes than that is also allowed.
Indifferent,
/// Padding must be canonical (0, 1, or 2 `=` as needed to produce a 4 byte suffix).
RequireCanonical,
/// Padding must be absent -- for when you want predictable padding, without any wasted bytes.
RequireNone,
}
/// Metadata about the result of a decode operation
#[derive(PartialEq, Eq, Debug)]
pub struct DecodeMetadata {
/// Number of decoded bytes output
pub(crate) decoded_len: usize,
/// Offset of the first padding byte in the input, if any
pub(crate) padding_offset: Option<usize>,
}
impl DecodeMetadata {
pub(crate) fn new(decoded_bytes: usize, padding_index: Option<usize>) -> Self {
Self {
decoded_len: decoded_bytes,
padding_offset: padding_index,
}
}
}

View File

@ -1,218 +0,0 @@
use crate::{
alphabet::Alphabet,
engine::{
general_purpose::{self, decode_table, encode_table},
Config, DecodeEstimate, DecodeMetadata, DecodePaddingMode, Engine,
},
DecodeError, PAD_BYTE,
};
use std::ops::{BitAnd, BitOr, Shl, Shr};
/// Comparatively simple implementation that can be used as something to compare against in tests
pub struct Naive {
encode_table: [u8; 64],
decode_table: [u8; 256],
config: NaiveConfig,
}
impl Naive {
const ENCODE_INPUT_CHUNK_SIZE: usize = 3;
const DECODE_INPUT_CHUNK_SIZE: usize = 4;
pub const fn new(alphabet: &Alphabet, config: NaiveConfig) -> Self {
Self {
encode_table: encode_table(alphabet),
decode_table: decode_table(alphabet),
config,
}
}
fn decode_byte_into_u32(&self, offset: usize, byte: u8) -> Result<u32, DecodeError> {
let decoded = self.decode_table[byte as usize];
if decoded == general_purpose::INVALID_VALUE {
return Err(DecodeError::InvalidByte(offset, byte));
}
Ok(decoded as u32)
}
}
impl Engine for Naive {
type Config = NaiveConfig;
type DecodeEstimate = NaiveEstimate;
fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize {
// complete chunks first
const LOW_SIX_BITS: u32 = 0x3F;
let rem = input.len() % Self::ENCODE_INPUT_CHUNK_SIZE;
// will never underflow
let complete_chunk_len = input.len() - rem;
let mut input_index = 0_usize;
let mut output_index = 0_usize;
if let Some(last_complete_chunk_index) =
complete_chunk_len.checked_sub(Self::ENCODE_INPUT_CHUNK_SIZE)
{
while input_index <= last_complete_chunk_index {
let chunk = &input[input_index..input_index + Self::ENCODE_INPUT_CHUNK_SIZE];
// populate low 24 bits from 3 bytes
let chunk_int: u32 =
(chunk[0] as u32).shl(16) | (chunk[1] as u32).shl(8) | (chunk[2] as u32);
// encode 4x 6-bit output bytes
output[output_index] = self.encode_table[chunk_int.shr(18) as usize];
output[output_index + 1] =
self.encode_table[chunk_int.shr(12_u8).bitand(LOW_SIX_BITS) as usize];
output[output_index + 2] =
self.encode_table[chunk_int.shr(6_u8).bitand(LOW_SIX_BITS) as usize];
output[output_index + 3] =
self.encode_table[chunk_int.bitand(LOW_SIX_BITS) as usize];
input_index += Self::ENCODE_INPUT_CHUNK_SIZE;
output_index += 4;
}
}
// then leftovers
if rem == 2 {
let chunk = &input[input_index..input_index + 2];
// high six bits of chunk[0]
output[output_index] = self.encode_table[chunk[0].shr(2) as usize];
// bottom 2 bits of [0], high 4 bits of [1]
output[output_index + 1] =
self.encode_table[(chunk[0].shl(4_u8).bitor(chunk[1].shr(4_u8)) as u32)
.bitand(LOW_SIX_BITS) as usize];
// bottom 4 bits of [1], with the 2 bottom bits as zero
output[output_index + 2] =
self.encode_table[(chunk[1].shl(2_u8) as u32).bitand(LOW_SIX_BITS) as usize];
output_index += 3;
} else if rem == 1 {
let byte = input[input_index];
output[output_index] = self.encode_table[byte.shr(2) as usize];
output[output_index + 1] =
self.encode_table[(byte.shl(4_u8) as u32).bitand(LOW_SIX_BITS) as usize];
output_index += 2;
}
output_index
}
fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate {
NaiveEstimate::new(input_len)
}
fn internal_decode(
&self,
input: &[u8],
output: &mut [u8],
estimate: Self::DecodeEstimate,
) -> Result<DecodeMetadata, DecodeError> {
if estimate.rem == 1 {
// trailing whitespace is so common that it's worth it to check the last byte to
// possibly return a better error message
if let Some(b) = input.last() {
if *b != PAD_BYTE
&& self.decode_table[*b as usize] == general_purpose::INVALID_VALUE
{
return Err(DecodeError::InvalidByte(input.len() - 1, *b));
}
}
return Err(DecodeError::InvalidLength);
}
let mut input_index = 0_usize;
let mut output_index = 0_usize;
const BOTTOM_BYTE: u32 = 0xFF;
// can only use the main loop on non-trailing chunks
if input.len() > Self::DECODE_INPUT_CHUNK_SIZE {
// skip the last chunk, whether it's partial or full, since it might
// have padding, and start at the beginning of the chunk before that
let last_complete_chunk_start_index = estimate.complete_chunk_len
- if estimate.rem == 0 {
// Trailing chunk is also full chunk, so there must be at least 2 chunks, and
// this won't underflow
Self::DECODE_INPUT_CHUNK_SIZE * 2
} else {
// Trailing chunk is partial, so it's already excluded in
// complete_chunk_len
Self::DECODE_INPUT_CHUNK_SIZE
};
while input_index <= last_complete_chunk_start_index {
let chunk = &input[input_index..input_index + Self::DECODE_INPUT_CHUNK_SIZE];
let decoded_int: u32 = self.decode_byte_into_u32(input_index, chunk[0])?.shl(18)
| self
.decode_byte_into_u32(input_index + 1, chunk[1])?
.shl(12)
| self.decode_byte_into_u32(input_index + 2, chunk[2])?.shl(6)
| self.decode_byte_into_u32(input_index + 3, chunk[3])?;
output[output_index] = decoded_int.shr(16_u8).bitand(BOTTOM_BYTE) as u8;
output[output_index + 1] = decoded_int.shr(8_u8).bitand(BOTTOM_BYTE) as u8;
output[output_index + 2] = decoded_int.bitand(BOTTOM_BYTE) as u8;
input_index += Self::DECODE_INPUT_CHUNK_SIZE;
output_index += 3;
}
}
general_purpose::decode_suffix::decode_suffix(
input,
input_index,
output,
output_index,
&self.decode_table,
self.config.decode_allow_trailing_bits,
self.config.decode_padding_mode,
)
}
fn config(&self) -> &Self::Config {
&self.config
}
}
pub struct NaiveEstimate {
/// remainder from dividing input by `Naive::DECODE_CHUNK_SIZE`
rem: usize,
/// Length of input that is in complete `Naive::DECODE_CHUNK_SIZE`-length chunks
complete_chunk_len: usize,
}
impl NaiveEstimate {
fn new(input_len: usize) -> Self {
let rem = input_len % Naive::DECODE_INPUT_CHUNK_SIZE;
let complete_chunk_len = input_len - rem;
Self {
rem,
complete_chunk_len,
}
}
}
impl DecodeEstimate for NaiveEstimate {
fn decoded_len_estimate(&self) -> usize {
((self.complete_chunk_len / 4) + ((self.rem > 0) as usize)) * 3
}
}
#[derive(Clone, Copy, Debug)]
pub struct NaiveConfig {
pub encode_padding: bool,
pub decode_allow_trailing_bits: bool,
pub decode_padding_mode: DecodePaddingMode,
}
impl Config for NaiveConfig {
fn encode_padding(&self) -> bool {
self.encode_padding
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,278 +0,0 @@
//! Correct, fast, and configurable [base64][] decoding and encoding. Base64
//! transports binary data efficiently in contexts where only plain text is
//! allowed.
//!
//! [base64]: https://developer.mozilla.org/en-US/docs/Glossary/Base64
//!
//! # Usage
//!
//! Use an [`Engine`] to decode or encode base64, configured with the base64
//! alphabet and padding behavior best suited to your application.
//!
//! ## Engine setup
//!
//! There is more than one way to encode a stream of bytes as “base64”.
//! Different applications use different encoding
//! [alphabets][alphabet::Alphabet] and
//! [padding behaviors][engine::general_purpose::GeneralPurposeConfig].
//!
//! ### Encoding alphabet
//!
//! Almost all base64 [alphabets][alphabet::Alphabet] use `A-Z`, `a-z`, and
//! `0-9`, which gives nearly 64 characters (26 + 26 + 10 = 62), but they differ
//! in their choice of their final 2.
//!
//! Most applications use the [standard][alphabet::STANDARD] alphabet specified
//! in [RFC 4648][rfc-alphabet]. If thats all you need, you can get started
//! quickly by using the pre-configured
//! [`STANDARD`][engine::general_purpose::STANDARD] engine, which is also available
//! in the [`prelude`] module as shown here, if you prefer a minimal `use`
//! footprint.
//!
#![cfg_attr(feature = "alloc", doc = "```")]
#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
//! use base64::prelude::*;
//!
//! # fn main() -> Result<(), base64::DecodeError> {
//! assert_eq!(BASE64_STANDARD.decode(b"+uwgVQA=")?, b"\xFA\xEC\x20\x55\0");
//! assert_eq!(BASE64_STANDARD.encode(b"\xFF\xEC\x20\x55\0"), "/+wgVQA=");
//! # Ok(())
//! # }
//! ```
//!
//! [rfc-alphabet]: https://datatracker.ietf.org/doc/html/rfc4648#section-4
//!
//! Other common alphabets are available in the [`alphabet`] module.
//!
//! #### URL-safe alphabet
//!
//! The standard alphabet uses `+` and `/` as its two non-alphanumeric tokens,
//! which cannot be safely used in URLs without encoding them as `%2B` and
//! `%2F`.
//!
//! To avoid that, some applications use a [“URL-safe” alphabet][alphabet::URL_SAFE],
//! which uses `-` and `_` instead. To use that alternative alphabet, use the
//! [`URL_SAFE`][engine::general_purpose::URL_SAFE] engine. This example doesn't
//! use [`prelude`] to show what a more explicit `use` would look like.
//!
#![cfg_attr(feature = "alloc", doc = "```")]
#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
//! use base64::{engine::general_purpose::URL_SAFE, Engine as _};
//!
//! # fn main() -> Result<(), base64::DecodeError> {
//! assert_eq!(URL_SAFE.decode(b"-uwgVQA=")?, b"\xFA\xEC\x20\x55\0");
//! assert_eq!(URL_SAFE.encode(b"\xFF\xEC\x20\x55\0"), "_-wgVQA=");
//! # Ok(())
//! # }
//! ```
//!
//! ### Padding characters
//!
//! Each base64 character represents 6 bits (2⁶ = 64) of the original binary
//! data, and every 3 bytes of input binary data will encode to 4 base64
//! characters (8 bits × 3 = 6 bits × 4 = 24 bits).
//!
//! When the input is not an even multiple of 3 bytes in length, [canonical][]
//! base64 encoders insert padding characters at the end, so that the output
//! length is always a multiple of 4:
//!
//! [canonical]: https://datatracker.ietf.org/doc/html/rfc4648#section-3.5
//!
#![cfg_attr(feature = "alloc", doc = "```")]
#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
//! use base64::{engine::general_purpose::STANDARD, Engine as _};
//!
//! assert_eq!(STANDARD.encode(b""), "");
//! assert_eq!(STANDARD.encode(b"f"), "Zg==");
//! assert_eq!(STANDARD.encode(b"fo"), "Zm8=");
//! assert_eq!(STANDARD.encode(b"foo"), "Zm9v");
//! ```
//!
//! Canonical encoding ensures that base64 encodings will be exactly the same,
//! byte-for-byte, regardless of input length. But the `=` padding characters
//! arent necessary for decoding, and they may be omitted by using a
//! [`NO_PAD`][engine::general_purpose::NO_PAD] configuration:
//!
#![cfg_attr(feature = "alloc", doc = "```")]
#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
//! use base64::{engine::general_purpose::STANDARD_NO_PAD, Engine as _};
//!
//! assert_eq!(STANDARD_NO_PAD.encode(b""), "");
//! assert_eq!(STANDARD_NO_PAD.encode(b"f"), "Zg");
//! assert_eq!(STANDARD_NO_PAD.encode(b"fo"), "Zm8");
//! assert_eq!(STANDARD_NO_PAD.encode(b"foo"), "Zm9v");
//! ```
//!
//! The pre-configured `NO_PAD` engines will reject inputs containing padding
//! `=` characters. To encode without padding and still accept padding while
//! decoding, create an [engine][engine::general_purpose::GeneralPurpose] with
//! that [padding mode][engine::DecodePaddingMode].
//!
#![cfg_attr(feature = "alloc", doc = "```")]
#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
//! # use base64::{engine::general_purpose::STANDARD_NO_PAD, Engine as _};
//! assert_eq!(STANDARD_NO_PAD.decode(b"Zm8="), Err(base64::DecodeError::InvalidPadding));
//! ```
//!
//! ### Further customization
//!
//! Decoding and encoding behavior can be customized by creating an
//! [engine][engine::GeneralPurpose] with an [alphabet][alphabet::Alphabet] and
//! [padding configuration][engine::GeneralPurposeConfig]:
//!
#![cfg_attr(feature = "alloc", doc = "```")]
#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
//! use base64::{engine, alphabet, Engine as _};
//!
//! // bizarro-world base64: +/ as the first symbols instead of the last
//! let alphabet =
//! alphabet::Alphabet::new("+/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
//! .unwrap();
//!
//! // a very weird config that encodes with padding but requires no padding when decoding...?
//! let crazy_config = engine::GeneralPurposeConfig::new()
//! .with_decode_allow_trailing_bits(true)
//! .with_encode_padding(true)
//! .with_decode_padding_mode(engine::DecodePaddingMode::RequireNone);
//!
//! let crazy_engine = engine::GeneralPurpose::new(&alphabet, crazy_config);
//!
//! let encoded = crazy_engine.encode(b"abc 123");
//!
//! ```
//!
//! ## Memory allocation
//!
//! The [decode][Engine::decode()] and [encode][Engine::encode()] engine methods
//! allocate memory for their results `decode` returns a `Vec<u8>` and
//! `encode` returns a `String`. To instead decode or encode into a buffer that
//! you allocated, use one of the alternative methods:
//!
//! #### Decoding
//!
//! | Method | Output | Allocates memory |
//! | -------------------------- | ----------------------------- | ----------------------------- |
//! | [`Engine::decode`] | returns a new `Vec<u8>` | always |
//! | [`Engine::decode_vec`] | appends to provided `Vec<u8>` | if `Vec` lacks capacity |
//! | [`Engine::decode_slice`] | writes to provided `&[u8]` | never
//!
//! #### Encoding
//!
//! | Method | Output | Allocates memory |
//! | -------------------------- | ---------------------------- | ------------------------------ |
//! | [`Engine::encode`] | returns a new `String` | always |
//! | [`Engine::encode_string`] | appends to provided `String` | if `String` lacks capacity |
//! | [`Engine::encode_slice`] | writes to provided `&[u8]` | never |
//!
//! ## Input and output
//!
//! The `base64` crate can [decode][Engine::decode()] and
//! [encode][Engine::encode()] values in memory, or
//! [`DecoderReader`][read::DecoderReader] and
//! [`EncoderWriter`][write::EncoderWriter] provide streaming decoding and
//! encoding for any [readable][std::io::Read] or [writable][std::io::Write]
//! byte stream.
//!
//! #### Decoding
//!
#![cfg_attr(feature = "std", doc = "```")]
#![cfg_attr(not(feature = "std"), doc = "```ignore")]
//! # use std::io;
//! use base64::{engine::general_purpose::STANDARD, read::DecoderReader};
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let mut input = io::stdin();
//! let mut decoder = DecoderReader::new(&mut input, &STANDARD);
//! io::copy(&mut decoder, &mut io::stdout())?;
//! # Ok(())
//! # }
//! ```
//!
//! #### Encoding
//!
#![cfg_attr(feature = "std", doc = "```")]
#![cfg_attr(not(feature = "std"), doc = "```ignore")]
//! # use std::io;
//! use base64::{engine::general_purpose::STANDARD, write::EncoderWriter};
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let mut output = io::stdout();
//! let mut encoder = EncoderWriter::new(&mut output, &STANDARD);
//! io::copy(&mut io::stdin(), &mut encoder)?;
//! # Ok(())
//! # }
//! ```
//!
//! #### Display
//!
//! If you only need a base64 representation for implementing the
//! [`Display`][std::fmt::Display] trait, use
//! [`Base64Display`][display::Base64Display]:
//!
//! ```
//! use base64::{display::Base64Display, engine::general_purpose::STANDARD};
//!
//! let value = Base64Display::new(b"\0\x01\x02\x03", &STANDARD);
//! assert_eq!("base64: AAECAw==", format!("base64: {}", value));
//! ```
//!
//! # Panics
//!
//! If length calculations result in overflowing `usize`, a panic will result.
#![cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))]
#![deny(
missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
unused_results,
variant_size_differences,
warnings
)]
#![forbid(unsafe_code)]
// Allow globally until https://github.com/rust-lang/rust-clippy/issues/8768 is resolved.
// The desired state is to allow it only for the rstest_reuse import.
#![allow(clippy::single_component_path_imports)]
#![cfg_attr(not(any(feature = "std", test)), no_std)]
#[cfg(any(feature = "alloc", test))]
extern crate alloc;
// has to be included at top level because of the way rstest_reuse defines its macros
#[cfg(test)]
use rstest_reuse;
mod chunked_encoder;
pub mod display;
#[cfg(any(feature = "std", test))]
pub mod read;
#[cfg(any(feature = "std", test))]
pub mod write;
pub mod engine;
pub use engine::Engine;
pub mod alphabet;
mod encode;
#[allow(deprecated)]
#[cfg(any(feature = "alloc", test))]
pub use crate::encode::{encode, encode_engine, encode_engine_string};
#[allow(deprecated)]
pub use crate::encode::{encode_engine_slice, encoded_len, EncodeSliceError};
mod decode;
#[allow(deprecated)]
#[cfg(any(feature = "alloc", test))]
pub use crate::decode::{decode, decode_engine, decode_engine_vec};
#[allow(deprecated)]
pub use crate::decode::{decode_engine_slice, decoded_len_estimate, DecodeError, DecodeSliceError};
pub mod prelude;
#[cfg(test)]
mod tests;
const PAD_BYTE: u8 = b'=';

View File

@ -1,20 +0,0 @@
//! Preconfigured engines for common use cases.
//!
//! These are re-exports of `const` engines in [crate::engine::general_purpose], renamed with a `BASE64_`
//! prefix for those who prefer to `use` the entire path to a name.
//!
//! # Examples
//!
#![cfg_attr(feature = "alloc", doc = "```")]
#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
//! use base64::prelude::{Engine as _, BASE64_STANDARD_NO_PAD};
//!
//! assert_eq!("c29tZSBieXRlcw", &BASE64_STANDARD_NO_PAD.encode(b"some bytes"));
//! ```
pub use crate::engine::Engine;
pub use crate::engine::general_purpose::STANDARD as BASE64_STANDARD;
pub use crate::engine::general_purpose::STANDARD_NO_PAD as BASE64_STANDARD_NO_PAD;
pub use crate::engine::general_purpose::URL_SAFE as BASE64_URL_SAFE;
pub use crate::engine::general_purpose::URL_SAFE_NO_PAD as BASE64_URL_SAFE_NO_PAD;

View File

@ -1,316 +0,0 @@
use crate::{engine::Engine, DecodeError, PAD_BYTE};
use std::{cmp, fmt, io};
// This should be large, but it has to fit on the stack.
pub(crate) const BUF_SIZE: usize = 1024;
// 4 bytes of base64 data encode 3 bytes of raw data (modulo padding).
const BASE64_CHUNK_SIZE: usize = 4;
const DECODED_CHUNK_SIZE: usize = 3;
/// A `Read` implementation that decodes base64 data read from an underlying reader.
///
/// # Examples
///
/// ```
/// use std::io::Read;
/// use std::io::Cursor;
/// use base64::engine::general_purpose;
///
/// // use a cursor as the simplest possible `Read` -- in real code this is probably a file, etc.
/// let mut wrapped_reader = Cursor::new(b"YXNkZg==");
/// let mut decoder = base64::read::DecoderReader::new(
/// &mut wrapped_reader,
/// &general_purpose::STANDARD);
///
/// // handle errors as you normally would
/// let mut result = Vec::new();
/// decoder.read_to_end(&mut result).unwrap();
///
/// assert_eq!(b"asdf", &result[..]);
///
/// ```
pub struct DecoderReader<'e, E: Engine, R: io::Read> {
engine: &'e E,
/// Where b64 data is read from
inner: R,
// Holds b64 data read from the delegate reader.
b64_buffer: [u8; BUF_SIZE],
// The start of the pending buffered data in b64_buffer.
b64_offset: usize,
// The amount of buffered b64 data.
b64_len: usize,
// Since the caller may provide us with a buffer of size 1 or 2 that's too small to copy a
// decoded chunk in to, we have to be able to hang on to a few decoded bytes.
// Technically we only need to hold 2 bytes but then we'd need a separate temporary buffer to
// decode 3 bytes into and then juggle copying one byte into the provided read buf and the rest
// into here, which seems like a lot of complexity for 1 extra byte of storage.
decoded_buffer: [u8; DECODED_CHUNK_SIZE],
// index of start of decoded data
decoded_offset: usize,
// length of decoded data
decoded_len: usize,
// used to provide accurate offsets in errors
total_b64_decoded: usize,
// offset of previously seen padding, if any
padding_offset: Option<usize>,
}
impl<'e, E: Engine, R: io::Read> fmt::Debug for DecoderReader<'e, E, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("DecoderReader")
.field("b64_offset", &self.b64_offset)
.field("b64_len", &self.b64_len)
.field("decoded_buffer", &self.decoded_buffer)
.field("decoded_offset", &self.decoded_offset)
.field("decoded_len", &self.decoded_len)
.field("total_b64_decoded", &self.total_b64_decoded)
.field("padding_offset", &self.padding_offset)
.finish()
}
}
impl<'e, E: Engine, R: io::Read> DecoderReader<'e, E, R> {
/// Create a new decoder that will read from the provided reader `r`.
pub fn new(reader: R, engine: &'e E) -> Self {
DecoderReader {
engine,
inner: reader,
b64_buffer: [0; BUF_SIZE],
b64_offset: 0,
b64_len: 0,
decoded_buffer: [0; DECODED_CHUNK_SIZE],
decoded_offset: 0,
decoded_len: 0,
total_b64_decoded: 0,
padding_offset: None,
}
}
/// Write as much as possible of the decoded buffer into the target buffer.
/// Must only be called when there is something to write and space to write into.
/// Returns a Result with the number of (decoded) bytes copied.
fn flush_decoded_buf(&mut self, buf: &mut [u8]) -> io::Result<usize> {
debug_assert!(self.decoded_len > 0);
debug_assert!(!buf.is_empty());
let copy_len = cmp::min(self.decoded_len, buf.len());
debug_assert!(copy_len > 0);
debug_assert!(copy_len <= self.decoded_len);
buf[..copy_len].copy_from_slice(
&self.decoded_buffer[self.decoded_offset..self.decoded_offset + copy_len],
);
self.decoded_offset += copy_len;
self.decoded_len -= copy_len;
debug_assert!(self.decoded_len < DECODED_CHUNK_SIZE);
Ok(copy_len)
}
/// Read into the remaining space in the buffer after the current contents.
/// Must only be called when there is space to read into in the buffer.
/// Returns the number of bytes read.
fn read_from_delegate(&mut self) -> io::Result<usize> {
debug_assert!(self.b64_offset + self.b64_len < BUF_SIZE);
let read = self
.inner
.read(&mut self.b64_buffer[self.b64_offset + self.b64_len..])?;
self.b64_len += read;
debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
Ok(read)
}
/// Decode the requested number of bytes from the b64 buffer into the provided buffer. It's the
/// caller's responsibility to choose the number of b64 bytes to decode correctly.
///
/// Returns a Result with the number of decoded bytes written to `buf`.
fn decode_to_buf(&mut self, b64_len_to_decode: usize, buf: &mut [u8]) -> io::Result<usize> {
debug_assert!(self.b64_len >= b64_len_to_decode);
debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
debug_assert!(!buf.is_empty());
let b64_to_decode = &self.b64_buffer[self.b64_offset..self.b64_offset + b64_len_to_decode];
let decode_metadata = self
.engine
.internal_decode(
b64_to_decode,
buf,
self.engine.internal_decoded_len_estimate(b64_len_to_decode),
)
.map_err(|e| match e {
DecodeError::InvalidByte(offset, byte) => {
// This can be incorrect, but not in a way that probably matters to anyone:
// if there was padding handled in a previous decode, and we are now getting
// InvalidByte due to more padding, we should arguably report InvalidByte with
// PAD_BYTE at the original padding position (`self.padding_offset`), but we
// don't have a good way to tie those two cases together, so instead we
// just report the invalid byte as if the previous padding, and its possibly
// related downgrade to a now invalid byte, didn't happen.
DecodeError::InvalidByte(self.total_b64_decoded + offset, byte)
}
DecodeError::InvalidLength => DecodeError::InvalidLength,
DecodeError::InvalidLastSymbol(offset, byte) => {
DecodeError::InvalidLastSymbol(self.total_b64_decoded + offset, byte)
}
DecodeError::InvalidPadding => DecodeError::InvalidPadding,
})
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
if let Some(offset) = self.padding_offset {
// we've already seen padding
if decode_metadata.decoded_len > 0 {
// we read more after already finding padding; report error at first padding byte
return Err(io::Error::new(
io::ErrorKind::InvalidData,
DecodeError::InvalidByte(offset, PAD_BYTE),
));
}
}
self.padding_offset = self.padding_offset.or(decode_metadata
.padding_offset
.map(|offset| self.total_b64_decoded + offset));
self.total_b64_decoded += b64_len_to_decode;
self.b64_offset += b64_len_to_decode;
self.b64_len -= b64_len_to_decode;
debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
Ok(decode_metadata.decoded_len)
}
/// Unwraps this `DecoderReader`, returning the base reader which it reads base64 encoded
/// input from.
///
/// Because `DecoderReader` performs internal buffering, the state of the inner reader is
/// unspecified. This function is mainly provided because the inner reader type may provide
/// additional functionality beyond the `Read` implementation which may still be useful.
pub fn into_inner(self) -> R {
self.inner
}
}
impl<'e, E: Engine, R: io::Read> io::Read for DecoderReader<'e, E, R> {
/// Decode input from the wrapped reader.
///
/// Under non-error circumstances, this returns `Ok` with the value being the number of bytes
/// written in `buf`.
///
/// Where possible, this function buffers base64 to minimize the number of read() calls to the
/// delegate reader.
///
/// # Errors
///
/// Any errors emitted by the delegate reader are returned. Decoding errors due to invalid
/// base64 are also possible, and will have `io::ErrorKind::InvalidData`.
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if buf.is_empty() {
return Ok(0);
}
// offset == BUF_SIZE when we copied it all last time
debug_assert!(self.b64_offset <= BUF_SIZE);
debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
debug_assert!(if self.b64_offset == BUF_SIZE {
self.b64_len == 0
} else {
self.b64_len <= BUF_SIZE
});
debug_assert!(if self.decoded_len == 0 {
// can be = when we were able to copy the complete chunk
self.decoded_offset <= DECODED_CHUNK_SIZE
} else {
self.decoded_offset < DECODED_CHUNK_SIZE
});
// We shouldn't ever decode into decoded_buffer when we can't immediately write at least one
// byte into the provided buf, so the effective length should only be 3 momentarily between
// when we decode and when we copy into the target buffer.
debug_assert!(self.decoded_len < DECODED_CHUNK_SIZE);
debug_assert!(self.decoded_len + self.decoded_offset <= DECODED_CHUNK_SIZE);
if self.decoded_len > 0 {
// we have a few leftover decoded bytes; flush that rather than pull in more b64
self.flush_decoded_buf(buf)
} else {
let mut at_eof = false;
while self.b64_len < BASE64_CHUNK_SIZE {
// Copy any bytes we have to the start of the buffer.
self.b64_buffer
.copy_within(self.b64_offset..self.b64_offset + self.b64_len, 0);
self.b64_offset = 0;
// then fill in more data
let read = self.read_from_delegate()?;
if read == 0 {
// we never read into an empty buf, so 0 => we've hit EOF
at_eof = true;
break;
}
}
if self.b64_len == 0 {
debug_assert!(at_eof);
// we must be at EOF, and we have no data left to decode
return Ok(0);
};
debug_assert!(if at_eof {
// if we are at eof, we may not have a complete chunk
self.b64_len > 0
} else {
// otherwise, we must have at least one chunk
self.b64_len >= BASE64_CHUNK_SIZE
});
debug_assert_eq!(0, self.decoded_len);
if buf.len() < DECODED_CHUNK_SIZE {
// caller requested an annoyingly short read
// have to write to a tmp buf first to avoid double mutable borrow
let mut decoded_chunk = [0_u8; DECODED_CHUNK_SIZE];
// if we are at eof, could have less than BASE64_CHUNK_SIZE, in which case we have
// to assume that these last few tokens are, in fact, valid (i.e. must be 2-4 b64
// tokens, not 1, since 1 token can't decode to 1 byte).
let to_decode = cmp::min(self.b64_len, BASE64_CHUNK_SIZE);
let decoded = self.decode_to_buf(to_decode, &mut decoded_chunk[..])?;
self.decoded_buffer[..decoded].copy_from_slice(&decoded_chunk[..decoded]);
self.decoded_offset = 0;
self.decoded_len = decoded;
// can be less than 3 on last block due to padding
debug_assert!(decoded <= 3);
self.flush_decoded_buf(buf)
} else {
let b64_bytes_that_can_decode_into_buf = (buf.len() / DECODED_CHUNK_SIZE)
.checked_mul(BASE64_CHUNK_SIZE)
.expect("too many chunks");
debug_assert!(b64_bytes_that_can_decode_into_buf >= BASE64_CHUNK_SIZE);
let b64_bytes_available_to_decode = if at_eof {
self.b64_len
} else {
// only use complete chunks
self.b64_len - self.b64_len % 4
};
let actual_decode_len = cmp::min(
b64_bytes_that_can_decode_into_buf,
b64_bytes_available_to_decode,
);
self.decode_to_buf(actual_decode_len, buf)
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More