From bf8f2951224b3c76db061ec5222cf3f0fb06c5e6 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 1 Apr 2020 12:03:32 +0000 Subject: [PATCH] rust: Fully remove failure crate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I previously ran out of steam in the switch and wanted to get the PR out for feedback before continuing, but it turns out I basically stopped 2 meters from the finish line. Completing the switch from `failure` → `anyhow` was quite easy. --- rust/Cargo.toml | 2 -- rust/src/composepost.rs | 10 ++++----- rust/src/coreos_rootfs.rs | 8 +++---- rust/src/history.rs | 46 +++++++++++++++++++-------------------- rust/src/journal.rs | 6 ++--- 5 files changed, 35 insertions(+), 37 deletions(-) diff --git a/rust/Cargo.toml b/rust/Cargo.toml index f6e8c7e2..1d77b3e6 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -5,8 +5,6 @@ authors = ["Colin Walters ", "Jonathan Lebon Fallible<()> { +fn postprocess_useradd(rootfs_dfd: &openat::Dir) -> Result<()> { let path = Path::new("usr/etc/default/useradd"); if let Some(f) = rootfs_dfd.open_file_optional(path)? { let f = io::BufReader::new(f); @@ -48,7 +48,7 @@ fn postprocess_useradd(rootfs_dfd: &openat::Dir) -> Fallible<()> { // We keep hitting issues with the ostree-remount preset not being // enabled; let's just do this rather than trying to propagate the // preset everywhere. -fn postprocess_presets(rootfs_dfd: &openat::Dir) -> Fallible<()> { +fn postprocess_presets(rootfs_dfd: &openat::Dir) -> Result<()> { let mut o = rootfs_dfd.write_file( "usr/lib/systemd/system-preset/40-rpm-ostree-auto.preset", 0o644, @@ -68,7 +68,7 @@ enable ostree-finalize-staged.path // and (2) make sure there *isn't* a /var/home -> /home substition rule. The latter check won't // technically be needed once downstreams have: // https://src.fedoraproject.org/rpms/selinux-policy/pull-request/14 -fn postprocess_subs_dist(rootfs_dfd: &openat::Dir) -> Fallible<()> { +fn postprocess_subs_dist(rootfs_dfd: &openat::Dir) -> Result<()> { let path = Path::new("usr/etc/selinux/targeted/contexts/files/file_contexts.subs_dist"); if let Some(f) = rootfs_dfd.open_file_optional(path)? { let f = io::BufReader::new(f); @@ -95,7 +95,7 @@ fn postprocess_subs_dist(rootfs_dfd: &openat::Dir) -> Fallible<()> { // This function is called from rpmostree_postprocess_final(); think of // it as the bits of that function that we've chosen to implement in Rust. -fn compose_postprocess_final(rootfs_dfd: &openat::Dir) -> Fallible<()> { +fn compose_postprocess_final(rootfs_dfd: &openat::Dir) -> Result<()> { let tasks = [ postprocess_useradd, postprocess_presets, diff --git a/rust/src/coreos_rootfs.rs b/rust/src/coreos_rootfs.rs index a4973951..5d82ba41 100644 --- a/rust/src/coreos_rootfs.rs +++ b/rust/src/coreos_rootfs.rs @@ -11,7 +11,7 @@ //! is here in rpm-ostree as a convenience. use std::os::unix::prelude::*; -use failure::{Fallible, ResultExt}; +use anyhow::{Result, Context}; use structopt::StructOpt; use openat; use nix; @@ -74,7 +74,7 @@ fn to_cstr(path: P) -> std::io::Result { } /// Set the immutable bit -fn seal(opts: &SealOpts) -> Fallible<()> { +fn seal(opts: &SealOpts) -> Result<()> { let fd = unsafe { let fd = libc::open(to_cstr(opts.sysroot.as_str())?.as_ref().as_ptr(), libc::O_CLOEXEC | libc::O_DIRECTORY); if fd < 0 { @@ -94,10 +94,10 @@ fn seal(opts: &SealOpts) -> Fallible<()> { } /// Main entrypoint -fn coreos_rootfs_main(args: &Vec) -> Fallible<()> { +fn coreos_rootfs_main(args: &Vec) -> Result<()> { let opt = Opt::from_iter(args.iter()); match opt { - Opt::Seal(ref opts) => seal(opts).with_context(|e| format!("Sealing: {}", e.to_string()))?, + Opt::Seal(ref opts) => seal(opts).context("Sealing rootfs failed")?, }; Ok(()) } diff --git a/rust/src/history.rs b/rust/src/history.rs index 3b43784c..caf493af 100644 --- a/rust/src/history.rs +++ b/rust/src/history.rs @@ -52,7 +52,7 @@ //! than scanning the whole journal upfront. This can then be e.g. piped through //! a pager, stopped after N entries, etc... -use failure::{bail, Fallible}; +use anyhow::{Result, bail}; use openat::{self, Dir, SimpleType}; use std::collections::VecDeque; use std::ffi::CString; @@ -168,7 +168,7 @@ enum JournalSearchMode { } #[cfg(not(test))] -fn journal_record_timestamp(journal: &journal::Journal) -> Fallible { +fn journal_record_timestamp(journal: &journal::Journal) -> Result { Ok(journal .timestamp()? .duration_since(std::time::UNIX_EPOCH)? @@ -176,7 +176,7 @@ fn journal_record_timestamp(journal: &journal::Journal) -> Fallible { } #[cfg(test)] -fn journal_record_timestamp(journal: &journal::Journal) -> Fallible { +fn journal_record_timestamp(journal: &journal::Journal) -> Result { Ok(journal.current_timestamp.unwrap()) } @@ -187,7 +187,7 @@ where s.and_then(|s| s.parse::().ok()) } -fn history_get_oldest_deployment_msg_timestamp() -> Fallible> { +fn history_get_oldest_deployment_msg_timestamp() -> Result> { let mut journal = journal::Journal::open(journal::JournalFiles::System, false, true)?; journal.seek(journal::JournalSeek::Head)?; journal.match_add("MESSAGE_ID", RPMOSTREE_DEPLOY_MSG)?; @@ -202,7 +202,7 @@ fn history_get_oldest_deployment_msg_timestamp() -> Fallible> { /// Gets the oldest deployment message in the journal, and nuke all the GVariant data files /// that correspond to deployments older than that one. Essentially, this binds pruning to /// journal pruning. Called from C through `ror_history_prune()`. -fn history_prune() -> Fallible<()> { +fn history_prune() -> Result<()> { if !Path::new(RPMOSTREE_HISTORY_DIR).exists() { return Ok(()) } @@ -238,7 +238,7 @@ fn history_prune() -> Fallible<()> { impl HistoryCtx { /// Create a new context object. Called from C through `ror_history_ctx_new()`. - fn new_boxed() -> Fallible> { + fn new_boxed() -> Result> { let mut journal = journal::Journal::open(journal::JournalFiles::System, false, true)?; journal.seek(journal::JournalSeek::Tail)?; @@ -252,7 +252,7 @@ impl HistoryCtx { } /// Ensures the journal filters are set up for the messages we're interested in. - fn set_search_mode(&mut self, mode: JournalSearchMode) -> Fallible<()> { + fn set_search_mode(&mut self, mode: JournalSearchMode) -> Result<()> { if Some(&mode) != self.search_mode.as_ref() { self.journal.match_flush()?; self.journal.match_add("MESSAGE_ID", OSTREE_BOOT_MSG)?; @@ -266,7 +266,7 @@ impl HistoryCtx { /// Creates a marker from an OSTree boot message. Uses the timestamp of the message /// itself as the boot time. Returns None if record is incomplete. - fn boot_record_to_marker(&self, record: &JournalRecord) -> Fallible> { + fn boot_record_to_marker(&self, record: &JournalRecord) -> Result> { if let (Some(path), Some(device), Some(inode)) = ( record.get("DEPLOYMENT_PATH"), map_to_u64(record.get("DEPLOYMENT_DEVICE")), @@ -284,7 +284,7 @@ impl HistoryCtx { /// Creates a marker from an RPM-OSTree deploy message. Uses the `DEPLOYMENT_TIMESTAMP` /// in the message as the deploy time. This matches the history gv filename for that /// deployment. Returns None if record is incomplete. - fn deployment_record_to_marker(&self, record: &JournalRecord) -> Fallible> { + fn deployment_record_to_marker(&self, record: &JournalRecord) -> Result> { if let (Some(timestamp), Some(device), Some(inode), Some(path)) = ( map_to_u64(record.get("DEPLOYMENT_TIMESTAMP")), map_to_u64(record.get("DEPLOYMENT_DEVICE")), @@ -304,7 +304,7 @@ impl HistoryCtx { } /// Goes to the next OSTree boot msg in the journal and returns its marker. - fn find_next_boot_marker(&mut self) -> Fallible> { + fn find_next_boot_marker(&mut self) -> Result> { self.set_search_mode(JournalSearchMode::BootMsgs)?; while let Some(rec) = self.journal.previous_record()? { if let Some(Marker::Boot(m)) = self.boot_record_to_marker(&rec)? { @@ -315,7 +315,7 @@ impl HistoryCtx { } /// Returns a marker of the appropriate kind for a given journal message. - fn record_to_marker(&self, record: &JournalRecord) -> Fallible> { + fn record_to_marker(&self, record: &JournalRecord) -> Result> { Ok(match record.get("MESSAGE_ID").unwrap() { m if m == OSTREE_BOOT_MSG => self.boot_record_to_marker(&record)?, m if m == RPMOSTREE_DEPLOY_MSG => self.deployment_record_to_marker(&record)?, @@ -325,7 +325,7 @@ impl HistoryCtx { /// Goes to the next OSTree boot or RPM-OSTree deploy msg in the journal, creates a /// marker for it, and returns it. - fn find_next_marker(&mut self) -> Fallible> { + fn find_next_marker(&mut self) -> Result> { self.set_search_mode(JournalSearchMode::BootAndDeploymentMsgs)?; while let Some(rec) = self.journal.previous_record()? { if let Some(marker) = self.record_to_marker(&rec)? { @@ -336,7 +336,7 @@ impl HistoryCtx { } /// Finds the matching deployment marker for the next boot marker in the queue. - fn scan_until_path_match(&mut self) -> Fallible> { + fn scan_until_path_match(&mut self) -> Result> { // keep popping & scanning until we get to the next boot marker let boot_marker = loop { match self.marker_queue.pop_front() { @@ -376,7 +376,7 @@ impl HistoryCtx { /// Returns the next history entry, which consists of a boot timestamp and its matching /// deploy timestamp. - fn scan_until_next_entry(&mut self) -> Fallible> { + fn scan_until_next_entry(&mut self) -> Result> { while let Some((boot_marker, deployment_marker)) = self.scan_until_path_match()? { if boot_marker.node != deployment_marker.node { // This is a non-foolproof safety valve to ensure that the boot is definitely @@ -399,7 +399,7 @@ impl HistoryCtx { /// of the same deployment into a single entry. The `boot_count` field represents the /// number of boots squashed, and `*_boot_timestamp` fields provide the timestamp of the /// first and last boots. - fn scan_until_next_new_entry(&mut self) -> Fallible> { + fn scan_until_next_new_entry(&mut self) -> Result> { while let Some(entry) = self.scan_until_next_entry()? { if self.current_entry.is_none() { /* first scan ever; prime with first entry */ @@ -426,7 +426,7 @@ impl HistoryCtx { /// Returns the next entry. This is a thin wrapper around `scan_until_next_new_entry` /// that mostly just handles the `Option` -> EOF conversion for the C side. Called from /// C through `ror_history_ctx_next()`. - fn next_entry(&mut self) -> Fallible { + fn next_entry(&mut self) -> Result { if self.reached_eof { bail!("next_entry() called after having reached EOF!") } @@ -445,7 +445,7 @@ impl HistoryCtx { /// stuff in the host journal; in fact without needing any system journal access at all. #[cfg(test)] mod mock_journal { - use super::Fallible; + use super::Result; pub use systemd::journal::{JournalFiles, JournalRecord, JournalSeek}; pub struct Journal { @@ -455,25 +455,25 @@ mod mock_journal { } impl Journal { - pub fn open(_: JournalFiles, _: bool, _: bool) -> Fallible { + pub fn open(_: JournalFiles, _: bool, _: bool) -> Result { Ok(Journal { entries: Vec::new(), current_timestamp: None, msg_ids: Vec::new(), }) } - pub fn seek(&mut self, _: JournalSeek) -> Fallible<()> { + pub fn seek(&mut self, _: JournalSeek) -> Result<()> { Ok(()) } - pub fn match_flush(&mut self) -> Fallible<()> { + pub fn match_flush(&mut self) -> Result<()> { self.msg_ids.clear(); Ok(()) } - pub fn match_add(&mut self, _: &str, msg_id: &str) -> Fallible<()> { + pub fn match_add(&mut self, _: &str, msg_id: &str) -> Result<()> { self.msg_ids.push(msg_id.into()); Ok(()) } - pub fn previous_record(&mut self) -> Fallible> { + pub fn previous_record(&mut self) -> Result> { while let Some((timestamp, record)) = self.entries.pop() { if self.msg_ids.contains(record.get("MESSAGE_ID").unwrap()) { self.current_timestamp = Some(timestamp); @@ -483,7 +483,7 @@ mod mock_journal { Ok(None) } // This is only used by the prune path, which we're not unit testing. - pub fn next_record(&mut self) -> Fallible> { + pub fn next_record(&mut self) -> Result> { unimplemented!(); } } diff --git a/rust/src/journal.rs b/rust/src/journal.rs index a5cf5dbc..a8885dce 100644 --- a/rust/src/journal.rs +++ b/rust/src/journal.rs @@ -4,7 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 OR MIT */ -use failure::Fallible; +use anyhow::Result; use systemd::id128::Id128; use systemd::journal; @@ -12,7 +12,7 @@ static OSTREE_FINALIZE_STAGED_SERVICE: &'static str = "ostree-finalize-staged.se static OSTREE_DEPLOYMENT_FINALIZING_MSG_ID: &'static str = "e8646cd63dff4625b77909a8e7a40994"; static OSTREE_DEPLOYMENT_COMPLETE_MSG_ID: &'static str = "dd440e3e549083b63d0efc7dc15255f1"; -fn print_staging_failure_msg(msg: Option<&str>) -> Fallible<()> { +fn print_staging_failure_msg(msg: Option<&str>) -> Result<()> { println!("Warning: failed to finalize previous deployment"); if let Some(msg) = msg { println!(" {}", msg); @@ -25,7 +25,7 @@ fn print_staging_failure_msg(msg: Option<&str>) -> Fallible<()> { } /// Look for a failure from ostree-finalized-stage.service in the journal of the previous boot. -fn journal_print_staging_failure() -> Fallible<()> { +fn journal_print_staging_failure() -> Result<()> { let mut j = journal::Journal::open(journal::JournalFiles::System, false, true)?; // first, go to the first entry of the current boot