mirror of
git://git.proxmox.com/git/pxar.git
synced 2024-12-22 21:33:50 +03:00
add more code documentation
all but the `format` module are now #![deny(missing_docs)] Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
parent
5d4e59c52c
commit
e5a2495ed3
@ -96,6 +96,7 @@ impl<T: ReadAt> Accessor<T> {
|
||||
Ok(Directory::new(self.inner.open_root_ref().await?))
|
||||
}
|
||||
|
||||
/// Set a cache for the goodbye tables to reduce random disk access.
|
||||
pub fn set_goodbye_table_cache<C>(&mut self, cache: Option<C>)
|
||||
where
|
||||
C: Cache<u64, [GoodbyeItem]> + Send + Sync + 'static,
|
||||
@ -112,6 +113,7 @@ impl<T: ReadAt> Accessor<T> {
|
||||
}
|
||||
|
||||
impl<T: Clone + ReadAt> Accessor<T> {
|
||||
/// Open the "root" directory entry of this pxar archive.
|
||||
pub async fn open_root(&self) -> io::Result<Directory<T>> {
|
||||
Ok(Directory::new(self.inner.open_root().await?))
|
||||
}
|
||||
@ -233,6 +235,7 @@ impl<T: Clone + ReadAt> FileEntry<T> {
|
||||
self.inner.content_range()
|
||||
}
|
||||
|
||||
/// Get the file's contents.
|
||||
pub async fn contents(&self) -> io::Result<FileContents<T>> {
|
||||
Ok(FileContents {
|
||||
inner: self.inner.contents().await?,
|
||||
@ -242,11 +245,14 @@ impl<T: Clone + ReadAt> FileEntry<T> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Convenience shortcut for when only the metadata contained in the [`Entry`] struct is of
|
||||
/// interest.
|
||||
#[inline]
|
||||
pub fn into_entry(self) -> Entry {
|
||||
self.inner.into_entry()
|
||||
}
|
||||
|
||||
/// Access the contained [`Entry`] for metadata access.
|
||||
#[inline]
|
||||
pub fn entry(&self) -> &Entry {
|
||||
&self.inner.entry()
|
||||
@ -288,6 +294,7 @@ impl<'a, T: Clone + ReadAt> ReadDir<'a, T> {
|
||||
self.inner.count()
|
||||
}
|
||||
|
||||
/// Get the next directory entry.
|
||||
pub async fn next(&mut self) -> Option<io::Result<DirEntry<'a, T>>> {
|
||||
match self.inner.next().await {
|
||||
Ok(Some(inner)) => Some(Ok(DirEntry { inner })),
|
||||
|
@ -1,5 +1,7 @@
|
||||
//! Random access for PXAR files.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
@ -36,11 +38,14 @@ use cache::Cache;
|
||||
/// Range information used for unsafe raw random access:
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct EntryRangeInfo {
|
||||
/// Offset to the `FILENAME` header.
|
||||
pub filename_header_offset: Option<u64>,
|
||||
/// Byte range spanning an entry in a pxar archive.
|
||||
pub entry_range: Range<u64>,
|
||||
}
|
||||
|
||||
impl EntryRangeInfo {
|
||||
/// Shortcut to create the "toplevel" range info without file name header offset.
|
||||
pub fn toplevel(entry_range: Range<u64>) -> Self {
|
||||
Self {
|
||||
filename_header_offset: None,
|
||||
@ -888,6 +893,8 @@ struct ReadResult {
|
||||
buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
/// A `SeqRead` adapter for a specific range inside another reader, with a temporary buffer due
|
||||
/// to lifetime constraints.
|
||||
#[doc(hidden)]
|
||||
pub struct SeqReadAtAdapter<T> {
|
||||
input: T,
|
||||
@ -910,6 +917,7 @@ impl<T> Drop for SeqReadAtAdapter<T> {
|
||||
}
|
||||
|
||||
impl<T: ReadAt> SeqReadAtAdapter<T> {
|
||||
/// Create a new `SeqRead` adapter given a range.
|
||||
pub fn new(input: T, range: Range<u64>) -> Self {
|
||||
if range.end < range.start {
|
||||
panic!("BAD SEQ READ AT ADAPTER");
|
||||
|
@ -1,3 +1,5 @@
|
||||
//! Async `ReadAt` trait.
|
||||
|
||||
use std::any::Any;
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
@ -7,12 +9,33 @@ use std::task::{Context, Poll};
|
||||
|
||||
/// Like Poll but Pending yields a value.
|
||||
pub enum MaybeReady<T, F> {
|
||||
/// Same as [`Poll::Ready`].
|
||||
Ready(T),
|
||||
|
||||
/// Same as [`Poll::Pending`], but contains a "cookie" identifying the ongoing operation.
|
||||
/// Without this value, it is impossible to make further progress on the operation.
|
||||
Pending(F),
|
||||
}
|
||||
|
||||
/// Random access read implementation.
|
||||
pub trait ReadAt {
|
||||
/// Begin a read operation.
|
||||
///
|
||||
/// Contrary to tokio and future's `AsyncRead` traits, this implements positional reads and
|
||||
/// therefore allows multiple operations to run simultaneously. In order to accomplish this,
|
||||
/// the result of this call includes a [`ReadAtOperation`] "cookie" identifying the particular
|
||||
/// read operation. This is necessary, since with an async runtime multiple such calls can come
|
||||
/// from the same thread and even the same task.
|
||||
///
|
||||
/// It is possible that this operation succeeds immediately, in which case
|
||||
/// `MaybeRead::Ready(Ok(bytes))` is returned containing the number of bytes read.
|
||||
///
|
||||
/// If the operation takes longer to complete, returns `MaybeReady::Pending(cookie)`, and the
|
||||
/// current taks will be notified via `cx.waker()` when progress can be made. Once that
|
||||
/// happens, [`poll_complete`](ReadAt::poll_complete) should be called using the returned
|
||||
/// `cookie`.
|
||||
///
|
||||
/// On error, returns `MaybeRead::Ready(Err(err))`.
|
||||
fn start_read_at<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
cx: &mut Context,
|
||||
@ -20,18 +43,38 @@ pub trait ReadAt {
|
||||
offset: u64,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>>;
|
||||
|
||||
/// Attempt to complete a previously started read operation identified by the provided
|
||||
/// [`ReadAtOperation`].
|
||||
///
|
||||
/// If the read operation is finished, returns `MaybeReady::Ready(Ok(bytes))` containing the
|
||||
/// number of bytes read.
|
||||
///
|
||||
/// If the operation is not yet completed, returns `MaybeReady::Pending(cookie)`, returning the
|
||||
/// (possibly modified) operation cookie again to be reused for the next call to
|
||||
/// `poll_complete`.
|
||||
///
|
||||
/// On error, returns `MaybeRead::Ready(Err(err))`.
|
||||
fn poll_complete<'a>(
|
||||
self: Pin<&'a Self>,
|
||||
op: ReadAtOperation<'a>,
|
||||
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>>;
|
||||
}
|
||||
|
||||
/// A "cookie" identifying a particular [`ReadAt`] operation.
|
||||
pub struct ReadAtOperation<'a> {
|
||||
/// The implementor of the [`ReadAt`] trait is responsible for what type of data is contained
|
||||
/// in here.
|
||||
///
|
||||
/// Note that the contained data needs to implement `Drop` so that dropping the "cookie"
|
||||
/// cancels the operation correctly.
|
||||
///
|
||||
/// Apart from this field, the struct only contains phantom data.
|
||||
pub cookie: Box<dyn Any + Send + Sync>,
|
||||
_marker: PhantomData<&'a mut [u8]>,
|
||||
}
|
||||
|
||||
impl<'a> ReadAtOperation<'a> {
|
||||
/// Create a new [`ReadAtOperation`].
|
||||
pub fn new<T: Into<Box<dyn Any + Send + Sync>>>(cookie: T) -> Self {
|
||||
Self {
|
||||
cookie: cookie.into(),
|
||||
@ -42,7 +85,9 @@ impl<'a> ReadAtOperation<'a> {
|
||||
|
||||
// awaitable helper:
|
||||
|
||||
/// [`ReadAt`] extension trait, akin to `AsyncReadExt`.
|
||||
pub trait ReadAtExt: ReadAt {
|
||||
/// Equivalent to `async fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize>`.
|
||||
fn read_at<'a>(&'a self, buf: &'a mut [u8], offset: u64) -> ReadAtImpl<'a, Self>
|
||||
where
|
||||
Self: Sized,
|
||||
|
@ -87,6 +87,7 @@ impl<T: ReadAt> Accessor<T> {
|
||||
)?))
|
||||
}
|
||||
|
||||
/// Set a cache for the goodbye tables to reduce random disk access.
|
||||
pub fn set_goodbye_table_cache<C>(&mut self, cache: Option<C>)
|
||||
where
|
||||
C: Cache<u64, [GoodbyeItem]> + Send + Sync + 'static,
|
||||
@ -103,6 +104,7 @@ impl<T: ReadAt> Accessor<T> {
|
||||
}
|
||||
|
||||
impl<T: Clone + ReadAt> Accessor<T> {
|
||||
/// Open the "root" directory entry of this pxar archive.
|
||||
pub fn open_root(&self) -> io::Result<Directory<T>> {
|
||||
Ok(Directory::new(poll_result_once(self.inner.open_root())?))
|
||||
}
|
||||
@ -155,13 +157,14 @@ impl<T: Clone + ReadAt> Accessor<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Adapter for FileExt readers.
|
||||
/// Adapter for `FileExt` readers to make it usable via the `ReadAt` trait.
|
||||
#[derive(Clone)]
|
||||
pub struct FileReader<T> {
|
||||
inner: T,
|
||||
}
|
||||
|
||||
impl<T: FileExt> FileReader<T> {
|
||||
/// Wrap a regular reader to access it via the `ReadAt` trait.
|
||||
pub fn new(inner: T) -> Self {
|
||||
Self { inner }
|
||||
}
|
||||
@ -185,13 +188,14 @@ impl<T: FileExt> ReadAt for FileReader<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Adapter for `Arc` or `Rc` to FileExt readers.
|
||||
/// Adapter for `Arc` or `Rc` to `FileExt` readers to make it usable via the `ReadAt` trait.
|
||||
#[derive(Clone)]
|
||||
pub struct FileRefReader<T: Clone> {
|
||||
inner: T,
|
||||
}
|
||||
|
||||
impl<T: Clone> FileRefReader<T> {
|
||||
/// Wrap a reference to a `FileExt` reader.
|
||||
pub fn new(inner: T) -> Self {
|
||||
Self { inner }
|
||||
}
|
||||
@ -291,6 +295,7 @@ impl<T: Clone + ReadAt> FileEntry<T> {
|
||||
self.inner.content_range()
|
||||
}
|
||||
|
||||
/// Get the file's contents.
|
||||
pub fn contents(&self) -> io::Result<FileContents<T>> {
|
||||
Ok(FileContents {
|
||||
inner: poll_result_once(self.inner.contents())?,
|
||||
@ -298,11 +303,14 @@ impl<T: Clone + ReadAt> FileEntry<T> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Convenience shortcut for when only the metadata contained in the [`Entry`] struct is of
|
||||
/// interest.
|
||||
#[inline]
|
||||
pub fn into_entry(self) -> Entry {
|
||||
self.inner.into_entry()
|
||||
}
|
||||
|
||||
/// Access the contained [`Entry`] for metadata access.
|
||||
#[inline]
|
||||
pub fn entry(&self) -> &Entry {
|
||||
&self.inner.entry()
|
||||
|
@ -27,6 +27,8 @@
|
||||
//! Heap](https://en.wikipedia.org/wiki/Binary_heap) gives a short
|
||||
//! intro howto store binary trees using an array.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use std::cmp::Ordering;
|
||||
|
||||
#[allow(clippy::many_single_char_names)]
|
||||
|
@ -2,6 +2,8 @@
|
||||
//!
|
||||
//! This is the implementation used by both the synchronous and async pxar wrappers.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::OsString;
|
||||
use std::io;
|
||||
@ -651,6 +653,7 @@ impl<I: SeqRead> DecoderImpl<I> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Reader for file contents inside a pxar archive.
|
||||
pub struct Contents<'a, T: SeqRead> {
|
||||
input: &'a mut T,
|
||||
at: &'a mut u64,
|
||||
@ -658,7 +661,7 @@ pub struct Contents<'a, T: SeqRead> {
|
||||
}
|
||||
|
||||
impl<'a, T: SeqRead> Contents<'a, T> {
|
||||
pub fn new(input: &'a mut T, at: &'a mut u64, len: u64) -> Self {
|
||||
fn new(input: &'a mut T, at: &'a mut u64, len: u64) -> Self {
|
||||
Self { input, at, len }
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@ impl<T: io::Read> Decoder<StandardReader<T>> {
|
||||
Decoder::new(StandardReader::new(input))
|
||||
}
|
||||
|
||||
/// Get a direct reference to the reader contained inside the contained [`StandardReader`].
|
||||
pub fn input(&mut self) -> &T {
|
||||
self.inner.input().inner()
|
||||
}
|
||||
@ -96,10 +97,12 @@ pub struct StandardReader<T> {
|
||||
}
|
||||
|
||||
impl<T: io::Read> StandardReader<T> {
|
||||
/// Make a new [`StandardReader`].
|
||||
pub fn new(inner: T) -> Self {
|
||||
Self { inner }
|
||||
}
|
||||
|
||||
/// Get an immutable reference to the contained reader.
|
||||
pub fn inner(&self) -> &T {
|
||||
&self.inner
|
||||
}
|
||||
@ -115,6 +118,7 @@ impl<T: io::Read> SeqRead for StandardReader<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Reader for file contents inside a pxar archive.
|
||||
pub struct Contents<'a, T: SeqRead> {
|
||||
inner: decoder::Contents<'a, T>,
|
||||
}
|
||||
|
@ -167,6 +167,11 @@ impl<'a, T: SeqWrite + 'a> Encoder<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// This is a "file" inside a pxar archive, to which the initially declared amount of data should
|
||||
/// be written.
|
||||
///
|
||||
/// Writing more or less than the designated amount is an error and will cause the produced archive
|
||||
/// to be broken.
|
||||
#[repr(transparent)]
|
||||
pub struct File<'a, S: SeqWrite> {
|
||||
inner: encoder::FileImpl<'a, S>,
|
||||
@ -213,11 +218,14 @@ mod tokio_writer {
|
||||
|
||||
use crate::encoder::SeqWrite;
|
||||
|
||||
/// Pxar encoder write adapter for [`AsyncWrite`](tokio::io::AsyncWrite).
|
||||
pub struct TokioWriter<T> {
|
||||
inner: Option<T>,
|
||||
}
|
||||
|
||||
impl<T: tokio::io::AsyncWrite> TokioWriter<T> {
|
||||
/// Make a new [`SeqWrite`] wrapper for an object implementing
|
||||
/// [`AsyncWrite`](tokio::io::AsyncWrite).
|
||||
pub fn new(inner: T) -> Self {
|
||||
Self { inner: Some(inner) }
|
||||
}
|
||||
|
@ -2,6 +2,8 @@
|
||||
//!
|
||||
//! This is the implementation used by both the synchronous and async pxar wrappers.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use std::io;
|
||||
use std::mem::{forget, size_of, size_of_val, take};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
@ -29,6 +31,7 @@ pub use sync::Encoder;
|
||||
pub struct LinkOffset(u64);
|
||||
|
||||
impl LinkOffset {
|
||||
/// Get the raw byte offset of this link.
|
||||
#[inline]
|
||||
pub fn raw(self) -> u64 {
|
||||
self.0
|
||||
@ -41,12 +44,23 @@ impl LinkOffset {
|
||||
/// synchronous wrapper and for both `tokio` and `future` `AsyncWrite` types in the asynchronous
|
||||
/// wrapper.
|
||||
pub trait SeqWrite {
|
||||
/// Attempt to perform a sequential write to the file. On success, the number of written bytes
|
||||
/// is returned as `Poll::Ready(Ok(bytes))`.
|
||||
///
|
||||
/// If writing is not yet possible, `Poll::Pending` is returned and the current task will be
|
||||
/// notified via the `cx.waker()` when writing becomes possible.
|
||||
fn poll_seq_write(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context,
|
||||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>>;
|
||||
|
||||
/// Attempt to flush the output, ensuring that all buffered data reaches the destination.
|
||||
///
|
||||
/// On success, returns `Poll::Ready(Ok(()))`.
|
||||
///
|
||||
/// If flushing cannot complete immediately, `Poll::Pending` is returned and the current task
|
||||
/// will be notified via `cx.waker()` when progress can be made.
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>>;
|
||||
}
|
||||
|
||||
@ -790,7 +804,7 @@ impl<'a, T: SeqWrite + 'a> EncoderImpl<'a, T> {
|
||||
}
|
||||
|
||||
/// Writer for a file object in a directory.
|
||||
pub struct FileImpl<'a, S: SeqWrite> {
|
||||
pub(crate) struct FileImpl<'a, S: SeqWrite> {
|
||||
output: &'a mut S,
|
||||
|
||||
/// This file's `GoodbyeItem`. FIXME: We currently don't touch this, can we just push it
|
||||
|
@ -165,6 +165,11 @@ impl<'a, T: SeqWrite + 'a> Encoder<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// This is a "file" inside a pxar archive, to which the initially declared amount of data should
|
||||
/// be written.
|
||||
///
|
||||
/// Writing more or less than the designated amount is an error and will cause the produced archive
|
||||
/// to be broken.
|
||||
#[repr(transparent)]
|
||||
pub struct File<'a, S: SeqWrite> {
|
||||
inner: encoder::FileImpl<'a, S>,
|
||||
@ -187,12 +192,13 @@ impl<'a, S: SeqWrite> io::Write for File<'a, S> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Pxar encoder write adapter for `std::io::Write`.
|
||||
/// Pxar encoder write adapter for [`Write`](std::io::Write).
|
||||
pub struct StandardWriter<T> {
|
||||
inner: Option<T>,
|
||||
}
|
||||
|
||||
impl<T: io::Write> StandardWriter<T> {
|
||||
/// Make a new [`SeqWrite`] wrapper for an object implementing [`Write`](std::io::Write).
|
||||
pub fn new(inner: T) -> Self {
|
||||
Self { inner: Some(inner) }
|
||||
}
|
||||
|
@ -692,6 +692,8 @@ pub struct QuotaProjectId {
|
||||
pub projid: u64,
|
||||
}
|
||||
|
||||
/// An entry in the "goodbye table" in a pxar archive. This is required for random access inside
|
||||
/// pxar archives.
|
||||
#[derive(Clone, Debug, Endian)]
|
||||
#[repr(C)]
|
||||
pub struct GoodbyeItem {
|
||||
@ -712,12 +714,15 @@ pub struct GoodbyeItem {
|
||||
}
|
||||
|
||||
impl GoodbyeItem {
|
||||
/// Create a new [`GoodbyeItem`] by hashing the name, and storing the hash along with the
|
||||
/// offset and size information.
|
||||
pub fn new(name: &[u8], offset: u64, size: u64) -> Self {
|
||||
let hash = hash_filename(name);
|
||||
Self { hash, offset, size }
|
||||
}
|
||||
}
|
||||
|
||||
/// Hash a file name for use in the goodbye table.
|
||||
pub fn hash_filename(name: &[u8]) -> u64 {
|
||||
use std::hash::Hasher;
|
||||
|
||||
@ -726,6 +731,8 @@ pub fn hash_filename(name: &[u8]) -> u64 {
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
/// Returns `true` if the path consists only of [`Normal`](std::path::Component::Normal)
|
||||
/// components.
|
||||
pub fn path_is_legal_component(path: &Path) -> bool {
|
||||
let mut components = path.components();
|
||||
match components.next() {
|
||||
@ -735,6 +742,10 @@ pub fn path_is_legal_component(path: &Path) -> bool {
|
||||
components.next().is_none()
|
||||
}
|
||||
|
||||
/// Assert sure the path consists only of [`Normal`](std::path::Component::Normal) components.
|
||||
///
|
||||
/// Returns an [`io::Error`](std::io::Error) of type [`Other`](std::io::ErrorKind::Other) if that's
|
||||
/// not the case.
|
||||
pub fn check_file_name(path: &Path) -> io::Result<()> {
|
||||
if !path_is_legal_component(path) {
|
||||
io_bail!("invalid file name in archive: {:?}", path);
|
||||
|
12
src/lib.rs
12
src/lib.rs
@ -152,11 +152,13 @@ impl From<MetadataBuilder> for Metadata {
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for the file [`Metadata`] stored in pxar archives.
|
||||
pub struct MetadataBuilder {
|
||||
inner: Metadata,
|
||||
}
|
||||
|
||||
impl MetadataBuilder {
|
||||
/// Create a new [`MetadataBuilder`] given an initial type/mode bitset.
|
||||
pub const fn new(type_and_mode: u64) -> Self {
|
||||
Self {
|
||||
inner: Metadata {
|
||||
@ -182,6 +184,7 @@ impl MetadataBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
/// Build the [`Metadata`].
|
||||
pub fn build(self) -> Metadata {
|
||||
self.inner
|
||||
}
|
||||
@ -323,6 +326,7 @@ pub struct Acl {
|
||||
}
|
||||
|
||||
impl Acl {
|
||||
/// Shortcut to check if all fields of this [`Acl`] entry are empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.users.is_empty()
|
||||
&& self.groups.is_empty()
|
||||
@ -354,7 +358,13 @@ pub enum EntryKind {
|
||||
Fifo,
|
||||
|
||||
/// Regular file.
|
||||
File { size: u64, offset: Option<u64> },
|
||||
File {
|
||||
/// The file size in bytes.
|
||||
size: u64,
|
||||
|
||||
/// The file's byte offset inside the archive, if available.
|
||||
offset: Option<u64>,
|
||||
},
|
||||
|
||||
/// Directory entry. When iterating through an archive, the contents follow next.
|
||||
Directory,
|
||||
|
Loading…
Reference in New Issue
Block a user