diff --git a/src/api.rs b/src/api.rs index f3c540cd..2296c3d7 100644 --- a/src/api.rs +++ b/src/api.rs @@ -2,6 +2,16 @@ //! //! This API is both for interacting with an in-process store and for interacting //! with a remote store via rpc calls. +//! +//! The entry point for the api is the [`Store`] struct. There are several ways +//! to obtain a `Store` instance: it is available via [`Deref`](std::ops::Deref) +//! from the different store implementations +//! (e.g. [`MemStore`](crate::store::mem::MemStore) +//! and [`FsStore`](crate::store::fs::FsStore)) as well as on the +//! [`BlobsProtocol`](crate::BlobsProtocol) iroh protocol handler. +//! +//! You can also [`connect`](Store::connect) to a remote store that is listening +//! to rpc requests. use std::{io, net::SocketAddr, ops::Deref, sync::Arc}; use bao_tree::io::EncodeError; diff --git a/src/hashseq.rs b/src/hashseq.rs index bcbebf98..98d96e45 100644 --- a/src/hashseq.rs +++ b/src/hashseq.rs @@ -1,8 +1,7 @@ //! Helpers for blobs that contain a sequence of hashes. -use std::{fmt::Debug, io}; +use std::fmt::Debug; use bytes::Bytes; -use iroh_io::{AsyncSliceReader, AsyncSliceReaderExt}; use crate::Hash; @@ -51,34 +50,6 @@ impl IntoIterator for HashSeq { } } -/// Stream over the hashes in a [`HashSeq`]. -/// -/// todo: make this wrap a reader instead of a [`HashSeq`]. -#[derive(Debug, Clone)] -pub struct HashSeqStream(HashSeq); - -impl HashSeqStream { - /// Get the next hash in the sequence. - #[allow(clippy::should_implement_trait, clippy::unused_async)] - pub async fn next(&mut self) -> io::Result> { - Ok(self.0.pop_front()) - } - - /// Skip a number of hashes in the sequence. - #[allow(clippy::unused_async)] - pub async fn skip(&mut self, n: u64) -> io::Result<()> { - let ok = self.0.drop_front(n as usize); - if !ok { - Err(io::Error::new( - io::ErrorKind::UnexpectedEof, - "end of sequence", - )) - } else { - Ok(()) - } - } -} - impl HashSeq { /// Create a new sequence of hashes. pub fn new(bytes: Bytes) -> Option { @@ -89,16 +60,6 @@ impl HashSeq { } } - fn drop_front(&mut self, n: usize) -> bool { - let start = n * 32; - if start > self.0.len() { - false - } else { - self.0 = self.0.slice(start..); - true - } - } - /// Iterate over the hashes in this sequence. pub fn iter(&self) -> impl Iterator + '_ { self.0.chunks_exact(32).map(|chunk| { @@ -155,14 +116,3 @@ impl Iterator for HashSeqIter { self.0.pop_front() } } - -/// Parse a sequence of hashes. -pub async fn parse_hash_seq<'a, R: AsyncSliceReader + 'a>( - mut reader: R, -) -> anyhow::Result<(HashSeqStream, u64)> { - let bytes = reader.read_to_end().await?; - let hashes = HashSeq::try_from(bytes)?; - let num_hashes = hashes.len() as u64; - let stream = HashSeqStream(hashes); - Ok((stream, num_hashes)) -} diff --git a/src/lib.rs b/src/lib.rs index 66f5e75c..ed4f7850 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -40,11 +40,12 @@ pub use net_protocol::BlobsProtocol; pub mod protocol; pub mod provider; pub mod ticket; -pub mod util; + +#[doc(hidden)] +pub mod test; +mod util; #[cfg(test)] mod tests; -pub mod test; - pub use protocol::ALPN; diff --git a/src/protocol/range_spec.rs b/src/protocol/range_spec.rs index c60414de..92cfe938 100644 --- a/src/protocol/range_spec.rs +++ b/src/protocol/range_spec.rs @@ -7,18 +7,19 @@ //! collection. use std::{fmt, sync::OnceLock}; -use bao_tree::{ChunkNum, ChunkRanges, ChunkRangesRef}; +pub use bao_tree::ChunkRanges; +use bao_tree::{ChunkNum, ChunkRangesRef}; use serde::{Deserialize, Serialize}; use smallvec::{smallvec, SmallVec}; +pub use crate::util::ChunkRangesExt; + static CHUNK_RANGES_EMPTY: OnceLock = OnceLock::new(); fn chunk_ranges_empty() -> &'static ChunkRanges { CHUNK_RANGES_EMPTY.get_or_init(ChunkRanges::empty) } -use crate::util::ChunkRangesExt; - #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] #[serde(from = "wire::RangeSpecSeq", into = "wire::RangeSpecSeq")] pub struct ChunkRangesSeq(pub(crate) SmallVec<[(u64, ChunkRanges); 2]>);