rust: Fully remove failure crate
I previously ran out of steam in the switch and wanted to get the PR out for feedback before continuing, but it turns out I basically stopped 2 meters from the finish line. Completing the switch from `failure` → `anyhow` was quite easy.
This commit is contained in:
parent
430f92c382
commit
bf8f295122
@ -5,8 +5,6 @@ authors = ["Colin Walters <walters@verbum.org>", "Jonathan Lebon <jonathan@jlebo
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
# TODO remove this
|
||||
failure = "0.1.7"
|
||||
anyhow = "1.0"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.105"
|
||||
|
@ -4,7 +4,7 @@
|
||||
* SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||
*/
|
||||
|
||||
use failure::Fallible;
|
||||
use anyhow::Result;
|
||||
use openat;
|
||||
use rayon::prelude::*;
|
||||
use std::io;
|
||||
@ -23,7 +23,7 @@ use crate::utils;
|
||||
// https://pagure.io/workstation-ostree-config/pull-request/121
|
||||
// https://discussion.fedoraproject.org/t/adapting-user-home-in-etc-passwd/487/6
|
||||
// https://github.com/justjanne/powerline-go/issues/94
|
||||
fn postprocess_useradd(rootfs_dfd: &openat::Dir) -> Fallible<()> {
|
||||
fn postprocess_useradd(rootfs_dfd: &openat::Dir) -> Result<()> {
|
||||
let path = Path::new("usr/etc/default/useradd");
|
||||
if let Some(f) = rootfs_dfd.open_file_optional(path)? {
|
||||
let f = io::BufReader::new(f);
|
||||
@ -48,7 +48,7 @@ fn postprocess_useradd(rootfs_dfd: &openat::Dir) -> Fallible<()> {
|
||||
// We keep hitting issues with the ostree-remount preset not being
|
||||
// enabled; let's just do this rather than trying to propagate the
|
||||
// preset everywhere.
|
||||
fn postprocess_presets(rootfs_dfd: &openat::Dir) -> Fallible<()> {
|
||||
fn postprocess_presets(rootfs_dfd: &openat::Dir) -> Result<()> {
|
||||
let mut o = rootfs_dfd.write_file(
|
||||
"usr/lib/systemd/system-preset/40-rpm-ostree-auto.preset",
|
||||
0o644,
|
||||
@ -68,7 +68,7 @@ enable ostree-finalize-staged.path
|
||||
// and (2) make sure there *isn't* a /var/home -> /home substition rule. The latter check won't
|
||||
// technically be needed once downstreams have:
|
||||
// https://src.fedoraproject.org/rpms/selinux-policy/pull-request/14
|
||||
fn postprocess_subs_dist(rootfs_dfd: &openat::Dir) -> Fallible<()> {
|
||||
fn postprocess_subs_dist(rootfs_dfd: &openat::Dir) -> Result<()> {
|
||||
let path = Path::new("usr/etc/selinux/targeted/contexts/files/file_contexts.subs_dist");
|
||||
if let Some(f) = rootfs_dfd.open_file_optional(path)? {
|
||||
let f = io::BufReader::new(f);
|
||||
@ -95,7 +95,7 @@ fn postprocess_subs_dist(rootfs_dfd: &openat::Dir) -> Fallible<()> {
|
||||
|
||||
// This function is called from rpmostree_postprocess_final(); think of
|
||||
// it as the bits of that function that we've chosen to implement in Rust.
|
||||
fn compose_postprocess_final(rootfs_dfd: &openat::Dir) -> Fallible<()> {
|
||||
fn compose_postprocess_final(rootfs_dfd: &openat::Dir) -> Result<()> {
|
||||
let tasks = [
|
||||
postprocess_useradd,
|
||||
postprocess_presets,
|
||||
|
@ -11,7 +11,7 @@
|
||||
//! is here in rpm-ostree as a convenience.
|
||||
|
||||
use std::os::unix::prelude::*;
|
||||
use failure::{Fallible, ResultExt};
|
||||
use anyhow::{Result, Context};
|
||||
use structopt::StructOpt;
|
||||
use openat;
|
||||
use nix;
|
||||
@ -74,7 +74,7 @@ fn to_cstr<P: openat::AsPath>(path: P) -> std::io::Result<P::Buffer> {
|
||||
}
|
||||
|
||||
/// Set the immutable bit
|
||||
fn seal(opts: &SealOpts) -> Fallible<()> {
|
||||
fn seal(opts: &SealOpts) -> Result<()> {
|
||||
let fd = unsafe {
|
||||
let fd = libc::open(to_cstr(opts.sysroot.as_str())?.as_ref().as_ptr(), libc::O_CLOEXEC | libc::O_DIRECTORY);
|
||||
if fd < 0 {
|
||||
@ -94,10 +94,10 @@ fn seal(opts: &SealOpts) -> Fallible<()> {
|
||||
}
|
||||
|
||||
/// Main entrypoint
|
||||
fn coreos_rootfs_main(args: &Vec<String>) -> Fallible<()> {
|
||||
fn coreos_rootfs_main(args: &Vec<String>) -> Result<()> {
|
||||
let opt = Opt::from_iter(args.iter());
|
||||
match opt {
|
||||
Opt::Seal(ref opts) => seal(opts).with_context(|e| format!("Sealing: {}", e.to_string()))?,
|
||||
Opt::Seal(ref opts) => seal(opts).context("Sealing rootfs failed")?,
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
@ -52,7 +52,7 @@
|
||||
//! than scanning the whole journal upfront. This can then be e.g. piped through
|
||||
//! a pager, stopped after N entries, etc...
|
||||
|
||||
use failure::{bail, Fallible};
|
||||
use anyhow::{Result, bail};
|
||||
use openat::{self, Dir, SimpleType};
|
||||
use std::collections::VecDeque;
|
||||
use std::ffi::CString;
|
||||
@ -168,7 +168,7 @@ enum JournalSearchMode {
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
fn journal_record_timestamp(journal: &journal::Journal) -> Fallible<u64> {
|
||||
fn journal_record_timestamp(journal: &journal::Journal) -> Result<u64> {
|
||||
Ok(journal
|
||||
.timestamp()?
|
||||
.duration_since(std::time::UNIX_EPOCH)?
|
||||
@ -176,7 +176,7 @@ fn journal_record_timestamp(journal: &journal::Journal) -> Fallible<u64> {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn journal_record_timestamp(journal: &journal::Journal) -> Fallible<u64> {
|
||||
fn journal_record_timestamp(journal: &journal::Journal) -> Result<u64> {
|
||||
Ok(journal.current_timestamp.unwrap())
|
||||
}
|
||||
|
||||
@ -187,7 +187,7 @@ where
|
||||
s.and_then(|s| s.parse::<u64>().ok())
|
||||
}
|
||||
|
||||
fn history_get_oldest_deployment_msg_timestamp() -> Fallible<Option<u64>> {
|
||||
fn history_get_oldest_deployment_msg_timestamp() -> Result<Option<u64>> {
|
||||
let mut journal = journal::Journal::open(journal::JournalFiles::System, false, true)?;
|
||||
journal.seek(journal::JournalSeek::Head)?;
|
||||
journal.match_add("MESSAGE_ID", RPMOSTREE_DEPLOY_MSG)?;
|
||||
@ -202,7 +202,7 @@ fn history_get_oldest_deployment_msg_timestamp() -> Fallible<Option<u64>> {
|
||||
/// Gets the oldest deployment message in the journal, and nuke all the GVariant data files
|
||||
/// that correspond to deployments older than that one. Essentially, this binds pruning to
|
||||
/// journal pruning. Called from C through `ror_history_prune()`.
|
||||
fn history_prune() -> Fallible<()> {
|
||||
fn history_prune() -> Result<()> {
|
||||
if !Path::new(RPMOSTREE_HISTORY_DIR).exists() {
|
||||
return Ok(())
|
||||
}
|
||||
@ -238,7 +238,7 @@ fn history_prune() -> Fallible<()> {
|
||||
|
||||
impl HistoryCtx {
|
||||
/// Create a new context object. Called from C through `ror_history_ctx_new()`.
|
||||
fn new_boxed() -> Fallible<Box<HistoryCtx>> {
|
||||
fn new_boxed() -> Result<Box<HistoryCtx>> {
|
||||
let mut journal = journal::Journal::open(journal::JournalFiles::System, false, true)?;
|
||||
journal.seek(journal::JournalSeek::Tail)?;
|
||||
|
||||
@ -252,7 +252,7 @@ impl HistoryCtx {
|
||||
}
|
||||
|
||||
/// Ensures the journal filters are set up for the messages we're interested in.
|
||||
fn set_search_mode(&mut self, mode: JournalSearchMode) -> Fallible<()> {
|
||||
fn set_search_mode(&mut self, mode: JournalSearchMode) -> Result<()> {
|
||||
if Some(&mode) != self.search_mode.as_ref() {
|
||||
self.journal.match_flush()?;
|
||||
self.journal.match_add("MESSAGE_ID", OSTREE_BOOT_MSG)?;
|
||||
@ -266,7 +266,7 @@ impl HistoryCtx {
|
||||
|
||||
/// Creates a marker from an OSTree boot message. Uses the timestamp of the message
|
||||
/// itself as the boot time. Returns None if record is incomplete.
|
||||
fn boot_record_to_marker(&self, record: &JournalRecord) -> Fallible<Option<Marker>> {
|
||||
fn boot_record_to_marker(&self, record: &JournalRecord) -> Result<Option<Marker>> {
|
||||
if let (Some(path), Some(device), Some(inode)) = (
|
||||
record.get("DEPLOYMENT_PATH"),
|
||||
map_to_u64(record.get("DEPLOYMENT_DEVICE")),
|
||||
@ -284,7 +284,7 @@ impl HistoryCtx {
|
||||
/// Creates a marker from an RPM-OSTree deploy message. Uses the `DEPLOYMENT_TIMESTAMP`
|
||||
/// in the message as the deploy time. This matches the history gv filename for that
|
||||
/// deployment. Returns None if record is incomplete.
|
||||
fn deployment_record_to_marker(&self, record: &JournalRecord) -> Fallible<Option<Marker>> {
|
||||
fn deployment_record_to_marker(&self, record: &JournalRecord) -> Result<Option<Marker>> {
|
||||
if let (Some(timestamp), Some(device), Some(inode), Some(path)) = (
|
||||
map_to_u64(record.get("DEPLOYMENT_TIMESTAMP")),
|
||||
map_to_u64(record.get("DEPLOYMENT_DEVICE")),
|
||||
@ -304,7 +304,7 @@ impl HistoryCtx {
|
||||
}
|
||||
|
||||
/// Goes to the next OSTree boot msg in the journal and returns its marker.
|
||||
fn find_next_boot_marker(&mut self) -> Fallible<Option<BootMarker>> {
|
||||
fn find_next_boot_marker(&mut self) -> Result<Option<BootMarker>> {
|
||||
self.set_search_mode(JournalSearchMode::BootMsgs)?;
|
||||
while let Some(rec) = self.journal.previous_record()? {
|
||||
if let Some(Marker::Boot(m)) = self.boot_record_to_marker(&rec)? {
|
||||
@ -315,7 +315,7 @@ impl HistoryCtx {
|
||||
}
|
||||
|
||||
/// Returns a marker of the appropriate kind for a given journal message.
|
||||
fn record_to_marker(&self, record: &JournalRecord) -> Fallible<Option<Marker>> {
|
||||
fn record_to_marker(&self, record: &JournalRecord) -> Result<Option<Marker>> {
|
||||
Ok(match record.get("MESSAGE_ID").unwrap() {
|
||||
m if m == OSTREE_BOOT_MSG => self.boot_record_to_marker(&record)?,
|
||||
m if m == RPMOSTREE_DEPLOY_MSG => self.deployment_record_to_marker(&record)?,
|
||||
@ -325,7 +325,7 @@ impl HistoryCtx {
|
||||
|
||||
/// Goes to the next OSTree boot or RPM-OSTree deploy msg in the journal, creates a
|
||||
/// marker for it, and returns it.
|
||||
fn find_next_marker(&mut self) -> Fallible<Option<Marker>> {
|
||||
fn find_next_marker(&mut self) -> Result<Option<Marker>> {
|
||||
self.set_search_mode(JournalSearchMode::BootAndDeploymentMsgs)?;
|
||||
while let Some(rec) = self.journal.previous_record()? {
|
||||
if let Some(marker) = self.record_to_marker(&rec)? {
|
||||
@ -336,7 +336,7 @@ impl HistoryCtx {
|
||||
}
|
||||
|
||||
/// Finds the matching deployment marker for the next boot marker in the queue.
|
||||
fn scan_until_path_match(&mut self) -> Fallible<Option<(BootMarker, DeploymentMarker)>> {
|
||||
fn scan_until_path_match(&mut self) -> Result<Option<(BootMarker, DeploymentMarker)>> {
|
||||
// keep popping & scanning until we get to the next boot marker
|
||||
let boot_marker = loop {
|
||||
match self.marker_queue.pop_front() {
|
||||
@ -376,7 +376,7 @@ impl HistoryCtx {
|
||||
|
||||
/// Returns the next history entry, which consists of a boot timestamp and its matching
|
||||
/// deploy timestamp.
|
||||
fn scan_until_next_entry(&mut self) -> Fallible<Option<HistoryEntry>> {
|
||||
fn scan_until_next_entry(&mut self) -> Result<Option<HistoryEntry>> {
|
||||
while let Some((boot_marker, deployment_marker)) = self.scan_until_path_match()? {
|
||||
if boot_marker.node != deployment_marker.node {
|
||||
// This is a non-foolproof safety valve to ensure that the boot is definitely
|
||||
@ -399,7 +399,7 @@ impl HistoryCtx {
|
||||
/// of the same deployment into a single entry. The `boot_count` field represents the
|
||||
/// number of boots squashed, and `*_boot_timestamp` fields provide the timestamp of the
|
||||
/// first and last boots.
|
||||
fn scan_until_next_new_entry(&mut self) -> Fallible<Option<HistoryEntry>> {
|
||||
fn scan_until_next_new_entry(&mut self) -> Result<Option<HistoryEntry>> {
|
||||
while let Some(entry) = self.scan_until_next_entry()? {
|
||||
if self.current_entry.is_none() {
|
||||
/* first scan ever; prime with first entry */
|
||||
@ -426,7 +426,7 @@ impl HistoryCtx {
|
||||
/// Returns the next entry. This is a thin wrapper around `scan_until_next_new_entry`
|
||||
/// that mostly just handles the `Option` -> EOF conversion for the C side. Called from
|
||||
/// C through `ror_history_ctx_next()`.
|
||||
fn next_entry(&mut self) -> Fallible<HistoryEntry> {
|
||||
fn next_entry(&mut self) -> Result<HistoryEntry> {
|
||||
if self.reached_eof {
|
||||
bail!("next_entry() called after having reached EOF!")
|
||||
}
|
||||
@ -445,7 +445,7 @@ impl HistoryCtx {
|
||||
/// stuff in the host journal; in fact without needing any system journal access at all.
|
||||
#[cfg(test)]
|
||||
mod mock_journal {
|
||||
use super::Fallible;
|
||||
use super::Result;
|
||||
pub use systemd::journal::{JournalFiles, JournalRecord, JournalSeek};
|
||||
|
||||
pub struct Journal {
|
||||
@ -455,25 +455,25 @@ mod mock_journal {
|
||||
}
|
||||
|
||||
impl Journal {
|
||||
pub fn open(_: JournalFiles, _: bool, _: bool) -> Fallible<Journal> {
|
||||
pub fn open(_: JournalFiles, _: bool, _: bool) -> Result<Journal> {
|
||||
Ok(Journal {
|
||||
entries: Vec::new(),
|
||||
current_timestamp: None,
|
||||
msg_ids: Vec::new(),
|
||||
})
|
||||
}
|
||||
pub fn seek(&mut self, _: JournalSeek) -> Fallible<()> {
|
||||
pub fn seek(&mut self, _: JournalSeek) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
pub fn match_flush(&mut self) -> Fallible<()> {
|
||||
pub fn match_flush(&mut self) -> Result<()> {
|
||||
self.msg_ids.clear();
|
||||
Ok(())
|
||||
}
|
||||
pub fn match_add(&mut self, _: &str, msg_id: &str) -> Fallible<()> {
|
||||
pub fn match_add(&mut self, _: &str, msg_id: &str) -> Result<()> {
|
||||
self.msg_ids.push(msg_id.into());
|
||||
Ok(())
|
||||
}
|
||||
pub fn previous_record(&mut self) -> Fallible<Option<JournalRecord>> {
|
||||
pub fn previous_record(&mut self) -> Result<Option<JournalRecord>> {
|
||||
while let Some((timestamp, record)) = self.entries.pop() {
|
||||
if self.msg_ids.contains(record.get("MESSAGE_ID").unwrap()) {
|
||||
self.current_timestamp = Some(timestamp);
|
||||
@ -483,7 +483,7 @@ mod mock_journal {
|
||||
Ok(None)
|
||||
}
|
||||
// This is only used by the prune path, which we're not unit testing.
|
||||
pub fn next_record(&mut self) -> Fallible<Option<JournalRecord>> {
|
||||
pub fn next_record(&mut self) -> Result<Option<JournalRecord>> {
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
* SPDX-License-Identifier: Apache-2.0 OR MIT
|
||||
*/
|
||||
|
||||
use failure::Fallible;
|
||||
use anyhow::Result;
|
||||
use systemd::id128::Id128;
|
||||
use systemd::journal;
|
||||
|
||||
@ -12,7 +12,7 @@ static OSTREE_FINALIZE_STAGED_SERVICE: &'static str = "ostree-finalize-staged.se
|
||||
static OSTREE_DEPLOYMENT_FINALIZING_MSG_ID: &'static str = "e8646cd63dff4625b77909a8e7a40994";
|
||||
static OSTREE_DEPLOYMENT_COMPLETE_MSG_ID: &'static str = "dd440e3e549083b63d0efc7dc15255f1";
|
||||
|
||||
fn print_staging_failure_msg(msg: Option<&str>) -> Fallible<()> {
|
||||
fn print_staging_failure_msg(msg: Option<&str>) -> Result<()> {
|
||||
println!("Warning: failed to finalize previous deployment");
|
||||
if let Some(msg) = msg {
|
||||
println!(" {}", msg);
|
||||
@ -25,7 +25,7 @@ fn print_staging_failure_msg(msg: Option<&str>) -> Fallible<()> {
|
||||
}
|
||||
|
||||
/// Look for a failure from ostree-finalized-stage.service in the journal of the previous boot.
|
||||
fn journal_print_staging_failure() -> Fallible<()> {
|
||||
fn journal_print_staging_failure() -> Result<()> {
|
||||
let mut j = journal::Journal::open(journal::JournalFiles::System, false, true)?;
|
||||
|
||||
// first, go to the first entry of the current boot
|
||||
|
Loading…
Reference in New Issue
Block a user