diff --git a/firewood/benches/hashops.rs b/firewood/benches/hashops.rs index a6ee313cb..d875db632 100644 --- a/firewood/benches/hashops.rs +++ b/firewood/benches/hashops.rs @@ -4,7 +4,9 @@ // hash benchmarks; run with 'cargo bench' use criterion::{criterion_group, criterion_main, profiler::Profiler, BatchSize, Criterion}; +use firewood::db::{BatchOp, DbConfig}; use firewood::merkle::Merkle; +use firewood::v2::api::{Db as _, Proposal as _}; use pprof::ProfilerGuard; use rand::{distributions::Alphanumeric, rngs::StdRng, Rng, SeedableRng}; use std::sync::Arc; @@ -52,23 +54,6 @@ impl Profiler for FlamegraphProfiler { } } -// TODO danlaine use or remove -// fn bench_trie_hash(criterion: &mut Criterion) { -// let mut to = [1u8; TRIE_HASH_LEN]; -// let mut store = InMemLinearStore::new(TRIE_HASH_LEN as u64, 0u8); -// store.write(0, &*ZERO_HASH).expect("write should succeed"); - -// #[allow(clippy::unwrap_used)] -// criterion -// .benchmark_group("TrieHash") -// .bench_function("dehydrate", |b| { -// b.iter(|| ZERO_HASH.serialize(&mut to).unwrap()); -// }) -// .bench_function("hydrate", |b| { -// b.iter(|| TrieHash::deserialize(0, &store).unwrap()); -// }); -// } - // This benchmark peeks into the merkle layer and times how long it takes // to insert NKEYS with a key length of KEYSIZE #[allow(clippy::unwrap_used)] @@ -107,57 +92,53 @@ fn bench_merkle(criterion: &mut Criter }); } -// This bechmark does the same thing as bench_merkle except it uses the revision manager -// TODO: Enable again once the revision manager is stable -// fn _bench_db(criterion: &mut Criterion) { -// const KEY_LEN: usize = 4; -// let mut rng = StdRng::seed_from_u64(1234); - -// #[allow(clippy::unwrap_used)] -// criterion -// .benchmark_group("Db") -// .sample_size(30) -// .bench_function("commit", |b| { -// b.to_async(tokio::runtime::Runtime::new().unwrap()) -// .iter_batched( -// || { -// let batch_ops: Vec<_> = repeat_with(|| { -// (&mut rng) -// .sample_iter(&Alphanumeric) -// .take(KEY_LEN) -// .collect() -// }) -// .map(|key: Vec<_>| BatchOp::Put { -// key, -// value: vec![b'v'], -// }) -// .take(N) -// .collect(); -// batch_ops -// }, -// |batch_ops| async { -// let db_path = std::env::temp_dir(); -// let db_path = db_path.join("benchmark_db"); -// let cfg = DbConfig::builder(); - -// #[allow(clippy::unwrap_used)] -// let db = firewood::db::Db::new(db_path, cfg.clone().truncate(true).build()) -// .await -// .unwrap(); - -// #[allow(clippy::unwrap_used)] -// db.propose(batch_ops).await.unwrap().commit().await.unwrap() -// }, -// BatchSize::SmallInput, -// ); -// }); -// } +#[allow(clippy::unwrap_used)] +fn bench_db(criterion: &mut Criterion) { + const KEY_LEN: usize = 4; + let mut rng = StdRng::seed_from_u64(1234); + + criterion + .benchmark_group("Db") + .sample_size(30) + .bench_function("commit", |b| { + b.to_async(tokio::runtime::Runtime::new().unwrap()) + .iter_batched( + || { + let batch_ops: Vec<_> = repeat_with(|| { + (&mut rng) + .sample_iter(&Alphanumeric) + .take(KEY_LEN) + .collect() + }) + .map(|key: Vec<_>| BatchOp::Put { + key, + value: vec![b'v'], + }) + .take(N) + .collect(); + batch_ops + }, + |batch_ops| async { + let db_path = std::env::temp_dir(); + let db_path = db_path.join("benchmark_db"); + let cfg = DbConfig::builder(); + + let db = firewood::db::Db::new(db_path, cfg.clone().truncate(true).build()) + .await + .unwrap(); + + db.propose(batch_ops).await.unwrap().commit().await.unwrap() + }, + BatchSize::SmallInput, + ); + }); +} criterion_group! { name = benches; config = Criterion::default().with_profiler(FlamegraphProfiler::Init(100)); // targets = bench_trie_hash, bench_merkle::<3, 32>, bench_db::<100> - targets = bench_merkle::<3, 4>, bench_merkle<3, 32> + targets = bench_merkle::<3, 4>, bench_merkle<3, 32>, bench_db<100> } criterion_main!(benches); diff --git a/firewood/examples/insert.rs b/firewood/examples/insert.rs index dfd86e79b..f9f32f2f9 100644 --- a/firewood/examples/insert.rs +++ b/firewood/examples/insert.rs @@ -49,7 +49,7 @@ async fn main() -> Result<(), Box> { let args = Args::parse(); - let mut db = Db::new("rev_db", cfg) + let db = Db::new("rev_db", cfg) .await .expect("db initiation should succeed"); diff --git a/firewood/src/db.rs b/firewood/src/db.rs index b9a422619..23903b96f 100644 --- a/firewood/src/db.rs +++ b/firewood/src/db.rs @@ -19,9 +19,6 @@ use std::sync::{Arc, RwLock}; use storage::{Committed, FileBacked, HashedNodeReader, ImmutableProposal, NodeStore, TrieHash}; use typed_builder::TypedBuilder; -// TODO use or remove -const _VERSION_STR: &[u8; 16] = b"firewood v0.1\0\0\0"; - #[derive(Debug)] #[non_exhaustive] pub enum DbError { @@ -69,9 +66,10 @@ impl api::DbView for HistoricalRev { async fn single_key_proof( &self, - _key: K, - ) -> Result>, api::Error> { - todo!() + key: K, + ) -> Result, api::Error> { + let merkle = Merkle::from(self); + merkle.prove(key.as_ref()).map_err(api::Error::from) } async fn range_proof( @@ -91,110 +89,6 @@ impl api::DbView for HistoricalRev { } } -// impl HistoricalRev { -// pub fn stream(&self) -> MerkleKeyValueStream<'_, T> { -// todo!() -// } - -// pub fn stream_from(&self, _start_key: &[u8]) -> MerkleKeyValueStream<'_, T> { -// todo!() -// } - -// /// Get root hash of the generic key-value storage. -// pub fn kv_root_hash(&self) -> Result { -// todo!() -// } - -// /// Get a value associated with a key. -// pub fn get(&self, _key: &[u8]) -> Option> { -// todo!() -// } - -// /// Dump the Trie of the generic key-value storage. -// pub fn dump(&self, _w: &mut dyn Write) -> Result<(), DbError> { -// todo!() -// } - -// pub fn prove(&self, _key: &[u8]) -> Result, MerkleError> { -// todo!() -// } - -// /// Verifies a range proof is valid for a set of keys. -// pub fn verify_range_proof>( -// &self, -// _proof: &Proof, -// _first_key: &[u8], -// _last_key: &[u8], -// _keys: Vec<&[u8]>, -// _values: Vec, -// ) -> Result { -// todo!() -// } -// } - -/// TODO danlaine: implement -// pub struct Proposal { -// _proposal: T, -// } - -// #[async_trait] -// impl api::Proposal for Proposal { -// type Proposal = Proposal; - -// async fn commit(self: Arc) -> Result<(), api::Error> { -// todo!() -// } - -// async fn propose( -// self: Arc, -// _data: api::Batch, -// ) -> Result, api::Error> { -// todo!() -// } -// } - -// #[async_trait] -// impl api::DbView for Proposal { -// type Stream<'a> = MerkleKeyValueStream<'a, T> where T: 'a; - -// async fn root_hash(&self) -> Result { -// todo!() -// } - -// async fn val(&self, _key: K) -> Result>, api::Error> -// where -// K: api::KeyType, -// { -// todo!() -// } - -// async fn single_key_proof(&self, _key: K) -> Result>, api::Error> -// where -// K: api::KeyType, -// { -// todo!() -// } - -// async fn range_proof( -// &self, -// _first_key: Option, -// _last_key: Option, -// _limit: Option, -// ) -> Result, Vec, ProofNode>>, api::Error> -// where -// K: api::KeyType, -// { -// todo!(); -// } - -// fn iter_option( -// &self, -// _first_key: Option, -// ) -> Result, api::Error> { -// todo!() -// } -// } - /// Database configuration. #[derive(Clone, TypedBuilder, Debug)] pub struct DbConfig { @@ -238,7 +132,7 @@ where } async fn propose<'p, K: KeyType, V: ValueType>( - &'p mut self, + &'p self, batch: api::Batch, ) -> Result>, api::Error> where @@ -293,17 +187,17 @@ impl Db { Ok(db) } - /// Create a proposal. - // pub fn new_proposal( - // &self, - // _data: Batch, - // ) -> Result, DbError> { - // todo!() - // } - /// Dump the Trie of the latest revision. - pub fn dump(&self, _w: &mut dyn Write) -> Result<(), DbError> { - todo!() + pub fn dump(&self, w: &mut dyn Write) -> Result<(), DbError> { + let latest_rev_nodestore = self + .manager + .read() + .expect("poisoned lock") + .current_revision(); + let merkle = Merkle::from(latest_rev_nodestore); + // TODO: This should be a stream + let output = merkle.dump().map_err(DbError::Merkle)?; + write!(w, "{}", output).map_err(DbError::IO) } pub fn metrics(&self) -> Arc { @@ -322,18 +216,17 @@ impl<'a> api::DbView for Proposal<'a> { type Stream<'b> = MerkleKeyValueStream<'b, NodeStore, FileBacked>> where Self: 'b; async fn root_hash(&self) -> Result, api::Error> { - todo!() + self.nodestore.root_hash().map_err(api::Error::from) } - async fn val(&self, _key: K) -> Result>, api::Error> { - todo!() + async fn val(&self, key: K) -> Result>, api::Error> { + let merkle = Merkle::from(self.nodestore.clone()); + merkle.get_value(key.as_ref()).map_err(api::Error::from) } - async fn single_key_proof( - &self, - _key: K, - ) -> Result>, api::Error> { - todo!() + async fn single_key_proof(&self, key: K) -> Result, api::Error> { + let merkle = Merkle::from(self.nodestore.clone()); + merkle.prove(key.as_ref()).map_err(api::Error::from) } async fn range_proof( @@ -359,9 +252,35 @@ impl<'a> api::Proposal for Proposal<'a> { async fn propose( self: Arc, - _data: api::Batch, + batch: api::Batch, ) -> Result, api::Error> { - todo!() + let parent = self.nodestore.clone(); + let proposal = NodeStore::new(parent)?; + let mut merkle = Merkle::from(proposal); + for op in batch { + match op { + BatchOp::Put { key, value } => { + merkle.insert(key.as_ref(), value.as_ref().into())?; + } + BatchOp::Delete { key } => { + merkle.remove(key.as_ref())?; + } + } + } + let nodestore = merkle.into_inner(); + let immutable: Arc, FileBacked>> = + Arc::new(nodestore.into()); + self.db + .manager + .write() + .expect("poisoned lock") + .add_proposal(immutable.clone()); + + Ok(Self::Proposal { + nodestore: immutable, + db: self.db, + } + .into()) } async fn commit(self: Arc) -> Result<(), api::Error> { @@ -391,7 +310,7 @@ mod test { #[tokio::test] async fn test_cloned_proposal_error() { - let mut db = testdb().await; + let db = testdb().await; let proposal = db .propose::, Vec>(Default::default()) .await diff --git a/firewood/src/manager.rs b/firewood/src/manager.rs index 9daadede6..958b3a6fa 100644 --- a/firewood/src/manager.rs +++ b/firewood/src/manager.rs @@ -42,7 +42,6 @@ pub(crate) struct RevisionManager { proposals: Vec, // committing_proposals: VecDeque>, by_hash: HashMap, - // TODO: maintain root hash of the most recent commit } #[derive(Debug, thiserror::Error)] @@ -148,8 +147,6 @@ impl RevisionManager { } } -pub type NewProposalError = (); // TODO implement - impl RevisionManager { pub fn add_proposal(&mut self, proposal: ProposedRevision) { self.proposals.push(proposal); diff --git a/firewood/src/v2/api.rs b/firewood/src/v2/api.rs index 95f7c2f88..7f3980d6d 100644 --- a/firewood/src/v2/api.rs +++ b/firewood/src/v2/api.rs @@ -96,19 +96,27 @@ pub enum Error { #[error("request RangeProof for empty trie")] RangeProofOnEmptyTrie, -} -impl From for Error { - fn from(err: MerkleError) -> Self { - // TODO: do a better job - Error::InternalError(Box::new(err)) - } + #[error("the latest revision is empty and has no root hash")] + LatestIsEmpty, + + #[error("commit the parents of this proposal first")] + NotLatest, + + #[error("sibling already committed")] + SiblingCommitted, + + #[error("merkle error: {0}")] + Merkle(#[from] MerkleError), } impl From for Error { fn from(err: RevisionManagerError) -> Self { - // TODO: do a better job - Error::InternalError(Box::new(err)) + match err { + RevisionManagerError::IO(io_err) => Error::IO(io_err), + RevisionManagerError::NotLatest => Error::NotLatest, + RevisionManagerError::SiblingCommitted => Error::SiblingCommitted, + } } } @@ -144,7 +152,7 @@ pub trait Db { /// [BatchOp::Delete] operations to apply /// async fn propose<'p, K: KeyType, V: ValueType>( - &'p mut self, + &'p self, data: Batch, ) -> Result>, Error> where @@ -173,8 +181,7 @@ pub trait DbView { async fn val(&self, key: K) -> Result>, Error>; /// Obtain a proof for a single key - async fn single_key_proof(&self, key: K) - -> Result>, Error>; + async fn single_key_proof(&self, key: K) -> Result, Error>; /// Obtain a range proof over a set of keys /// diff --git a/firewood/src/v2/emptydb.rs b/firewood/src/v2/emptydb.rs index 2dc95203b..f3ff0c77e 100644 --- a/firewood/src/v2/emptydb.rs +++ b/firewood/src/v2/emptydb.rs @@ -40,7 +40,7 @@ impl Db for EmptyDb { } async fn propose<'p, K, V>( - &'p mut self, + &'p self, data: Batch, ) -> Result>, Error> where @@ -66,11 +66,8 @@ impl DbView for HistoricalImpl { Ok(None) } - async fn single_key_proof( - &self, - _key: K, - ) -> Result>, Error> { - Ok(None) + async fn single_key_proof(&self, _key: K) -> Result, Error> { + Err(Error::RangeProofOnEmptyTrie) } async fn range_proof( @@ -108,7 +105,7 @@ mod tests { #[tokio::test] async fn basic_proposal() -> Result<(), Error> { - let mut db = EmptyDb; + let db = EmptyDb; let batch = vec![ BatchOp::Put { @@ -132,7 +129,7 @@ mod tests { #[tokio::test] async fn nested_proposal() -> Result<(), Error> { - let mut db = EmptyDb; + let db = EmptyDb; // create proposal1 which adds key "k" with value "v" and deletes "z" let batch = vec![ BatchOp::Put { @@ -164,10 +161,12 @@ mod tests { ); // create a proposal3 by adding the two proposals together, keeping the originals - // TODO: consider making this possible again - // let proposal3 = proposal1.as_ref() + proposal2.as_ref(); - // assert_eq!(proposal3.val(b"k").await.unwrap().unwrap(), b"v"); - // assert_eq!(proposal3.val(b"z").await.unwrap().unwrap(), b"undo"); + let proposal3 = proposal1.as_ref() + proposal2.as_ref(); + assert_eq!(proposal3.val(b"k").await.unwrap().unwrap().to_vec(), b"v"); + assert_eq!( + proposal3.val(b"z").await.unwrap().unwrap().to_vec(), + b"undo" + ); // now consume proposal1 and proposal2 proposal2.commit().await?; diff --git a/firewood/src/v2/propose.rs b/firewood/src/v2/propose.rs index e86bfdaf6..4fa6ee490 100644 --- a/firewood/src/v2/propose.rs +++ b/firewood/src/v2/propose.rs @@ -126,10 +126,7 @@ impl api::DbView for Proposal { } } - async fn single_key_proof( - &self, - _key: K, - ) -> Result>, api::Error> { + async fn single_key_proof(&self, _key: K) -> Result, api::Error> { todo!(); } diff --git a/grpc-testtool/benches/insert.rs b/grpc-testtool/benches/insert.rs index 7168c6fee..46ff809e8 100644 --- a/grpc-testtool/benches/insert.rs +++ b/grpc-testtool/benches/insert.rs @@ -20,7 +20,7 @@ const TESTDIR: &str = "/tmp/benchdb"; /// The port to use for testing const TESTPORT: u16 = 5000; /// The URI to connect to; this better match the TESTPORT -const TESTURI: &str = "http://localhost:5000"; +const TESTURI: &str = "http://127.0.0.1:5000"; /// Retry timeouts (in seconds); we want this long for processes /// to start and exit const RETRY_TIMEOUT_SEC: u32 = 5; diff --git a/grpc-testtool/src/service.rs b/grpc-testtool/src/service.rs index 06809f9ad..163cb006c 100644 --- a/grpc-testtool/src/service.rs +++ b/grpc-testtool/src/service.rs @@ -2,6 +2,7 @@ // See the file LICENSE.md for licensing terms. use firewood::db::{Db, DbConfig}; +use firewood::v2::api::Db as _; use firewood::v2::api::Error; use std::path::Path; @@ -20,11 +21,11 @@ pub mod database; pub mod db; pub mod process; -trait _IntoStatusResultExt { +trait IntoStatusResultExt { fn into_status_result(self) -> Result; } -impl _IntoStatusResultExt for Result { +impl IntoStatusResultExt for Result { // We map errors from bad arguments into Status::invalid_argument; all other errors are Status::internal errors fn into_status_result(self) -> Result { self.map_err(|err| match err { @@ -70,12 +71,12 @@ impl Deref for Database { } } -// impl Database { -// async fn latest(&self) -> Result::Historical>, Error> { -// let root_hash = self.root_hash().await?; -// self.revision(root_hash).await -// } -// } +impl Database { + async fn latest(&self) -> Result::Historical>, Error> { + let root_hash = self.root_hash().await?.ok_or(Error::LatestIsEmpty)?; + self.revision(root_hash).await + } +} // TODO: implement Iterator #[derive(Debug)] diff --git a/grpc-testtool/src/service/database.rs b/grpc-testtool/src/service/database.rs index e3857c5a8..06e9e7827 100644 --- a/grpc-testtool/src/service/database.rs +++ b/grpc-testtool/src/service/database.rs @@ -1,7 +1,7 @@ // Copyright (C) 2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE.md for licensing terms. -use super::{Database as DatabaseService, Iter}; +use super::{Database as DatabaseService, IntoStatusResultExt as _, Iter}; use crate::rpcdb::{ database_server::Database, CloseRequest, CloseResponse, CompactRequest, CompactResponse, DeleteRequest, DeleteResponse, GetRequest, GetResponse, HasRequest, HasResponse, @@ -10,71 +10,67 @@ use crate::rpcdb::{ NewIteratorWithStartAndPrefixRequest, NewIteratorWithStartAndPrefixResponse, PutRequest, PutResponse, WriteBatchRequest, WriteBatchResponse, }; -use firewood::v2::api::BatchOp; +use firewood::v2::api::{BatchOp, Db as _, DbView as _, Proposal as _}; use tonic::{async_trait, Request, Response, Status}; #[async_trait] impl Database for DatabaseService { - async fn has(&self, _request: Request) -> Result, Status> { - todo!() - // let key = request.into_inner().key; - // let revision = self.latest().await.into_status_result()?; + async fn has(&self, request: Request) -> Result, Status> { + let key = request.into_inner().key; + let revision = self.latest().await.into_status_result()?; - // let val = revision.val(key).await.into_status_result()?; + let val = revision.val(key).await.into_status_result()?; - // let response = HasResponse { - // has: val.is_some(), - // ..Default::default() - // }; + let response = HasResponse { + has: val.is_some(), + ..Default::default() + }; - // Ok(Response::new(response)) + Ok(Response::new(response)) } - async fn get(&self, _request: Request) -> Result, Status> { - todo!() - // let key = request.into_inner().key; - // let revision = self.latest().await.into_status_result()?; + async fn get(&self, request: Request) -> Result, Status> { + let key = request.into_inner().key; + let revision = self.latest().await.into_status_result()?; - // let value = revision - // .val(key) - // .await - // .into_status_result()? - // .map(|v| v.to_vec()); + let value = revision + .val(key) + .await + .into_status_result()? + .map(|v| v.to_vec()); - // let Some(value) = value else { - // return Err(Status::not_found("key not found")); - // }; + let Some(value) = value else { + return Err(Status::not_found("key not found")); + }; - // let response = GetResponse { - // value, - // ..Default::default() - // }; + let response = GetResponse { + value, + ..Default::default() + }; - // Ok(Response::new(response)) + Ok(Response::new(response)) } - async fn put(&self, _request: Request) -> Result, Status> { - todo!() - // let PutRequest { key, value } = request.into_inner(); - // let batch = BatchOp::Put { key, value }; - // let proposal = self.db.propose(vec![batch]).await.into_status_result()?; - // let _ = proposal.commit().await.into_status_result()?; + async fn put(&self, request: Request) -> Result, Status> { + let PutRequest { key, value } = request.into_inner(); + let batch = BatchOp::Put { key, value }; + let proposal = self.db.propose(vec![batch]).await.into_status_result()?; + let _ = proposal.commit().await.into_status_result()?; - // Ok(Response::new(PutResponse::default())) + Ok(Response::new(PutResponse::default())) } async fn delete( &self, - _request: Request, + request: Request, ) -> Result, Status> { - todo!() - // let DeleteRequest { key } = request.into_inner(); - // let batch = BatchOp::<_, Vec>::Delete { key }; - // let proposal = self.db.propose(vec![batch]).await.into_status_result()?; - // let _ = proposal.commit().await.into_status_result()?; + let DeleteRequest { key } = request.into_inner(); + let batch = BatchOp::<_, Vec>::Delete { key }; + let proposal = self.db.propose(vec![batch]).await.into_status_result()?; + let _ = proposal.commit().await.into_status_result()?; - // Ok(Response::new(DeleteResponse::default())) + Ok(Response::new(DeleteResponse::default())) } async fn compact( @@ -101,19 +97,18 @@ impl Database for DatabaseService { async fn write_batch( &self, - _request: Request, + request: Request, ) -> Result, Status> { - todo!() - // let WriteBatchRequest { puts, deletes } = request.into_inner(); - // let batch = puts - // .into_iter() - // .map(from_put_request) - // .chain(deletes.into_iter().map(from_delete_request)) - // .collect(); - // let proposal = self.db.propose(batch).await.into_status_result()?; - // let _ = proposal.commit().await.into_status_result()?; - - // Ok(Response::new(WriteBatchResponse::default())) + let WriteBatchRequest { puts, deletes } = request.into_inner(); + let batch = puts + .into_iter() + .map(from_put_request) + .chain(deletes.into_iter().map(from_delete_request)) + .collect(); + let proposal = self.db.propose(batch).await.into_status_result()?; + let _ = proposal.commit().await.into_status_result()?; + + Ok(Response::new(WriteBatchResponse::default())) } async fn new_iterator_with_start_and_prefix( @@ -163,13 +158,13 @@ impl Database for DatabaseService { } } -fn _from_put_request(request: PutRequest) -> BatchOp, Vec> { +fn from_put_request(request: PutRequest) -> BatchOp, Vec> { BatchOp::Put { key: request.key, value: request.value, } } -fn _from_delete_request(request: DeleteRequest) -> BatchOp, Vec> { +fn from_delete_request(request: DeleteRequest) -> BatchOp, Vec> { BatchOp::Delete { key: request.key } } diff --git a/storage/src/nodestore.rs b/storage/src/nodestore.rs index 6eee06c20..991543008 100644 --- a/storage/src/nodestore.rs +++ b/storage/src/nodestore.rs @@ -50,6 +50,7 @@ use std::io::{Error, ErrorKind, Write}; use std::iter::once; use std::mem::offset_of; use std::num::NonZeroU64; +use std::ops::Deref; use std::sync::Arc; use crate::hashednode::hash_node; @@ -244,7 +245,7 @@ pub trait Parentable { impl Parentable for Arc { fn as_nodestore_parent(&self) -> NodeStoreParent { - NodeStoreParent::Proposed(self.clone()) + NodeStoreParent::Proposed(Arc::clone(self)) } fn root_hash(&self) -> Option { self.root_hash.clone() @@ -587,29 +588,46 @@ pub trait HashedNodeReader: TrieReader { } } +impl HashedNodeReader for T +where + T: Deref, + T::Target: HashedNodeReader, +{ + fn root_address_and_hash(&self) -> Result, Error> { + self.deref().root_address_and_hash() + } +} + /// Reads nodes and the root address from a merkle trie. pub trait TrieReader: NodeReader + RootReader {} +impl TrieReader for T where T: NodeReader + RootReader {} + +/// Reads nodes from a merkle trie. +pub trait NodeReader { + /// Returns the node at `addr`. + fn read_node(&self, addr: LinearAddress) -> Result, Error>; +} -impl TrieReader for &NodeStore {} -impl NodeReader for &NodeStore { +impl NodeReader for T +where + T: Deref, + T::Target: NodeReader, +{ fn read_node(&self, addr: LinearAddress) -> Result, Error> { - self.read_node_from_disk(addr) + self.deref().read_node(addr) } } -impl RootReader for &NodeStore { + +impl RootReader for T +where + T: Deref, + T::Target: RootReader, +{ fn root_node(&self) -> Option> { - self.header - .root_address - .map(|addr| self.read_node_from_disk(addr).unwrap()) + self.deref().root_node() } } -/// Reads nodes from a merkle trie. -pub trait NodeReader { - /// Returns the node at `addr`. - fn read_node(&self, addr: LinearAddress) -> Result, Error>; -} - /// Reads the root of a merkle trie. pub trait RootReader { /// Returns the root of the trie. @@ -676,7 +694,7 @@ impl ImmutableProposal { } } -impl ReadInMemoryNode for Arc { +impl ReadInMemoryNode for ImmutableProposal { fn read_in_memory_node(&self, addr: LinearAddress) -> Option> { // Check if the node being requested was created in this proposal. if let Some((_, node)) = self.new.get(&addr) { @@ -703,6 +721,16 @@ pub trait ReadInMemoryNode { fn read_in_memory_node(&self, addr: LinearAddress) -> Option>; } +impl ReadInMemoryNode for T +where + T: Deref, + T::Target: ReadInMemoryNode, +{ + fn read_in_memory_node(&self, addr: LinearAddress) -> Option> { + self.deref().read_in_memory_node(addr) + } +} + /// Contains the state of a revision of a merkle trie. /// The first generic parameter is the type of the revision, which supports reading nodes from parent proposals. /// The second generic parameter is the type of the storage used, either @@ -1025,11 +1053,6 @@ impl RootReader for NodeStore< } } -impl TrieReader for NodeStore where - NodeStore: RootReader -{ -} - impl HashedNodeReader for NodeStore where NodeStore: TrieReader,