Skip to content

Commit

Permalink
Merge pull request #2142 from AleoHQ/fix/map
Browse files Browse the repository at this point in the history
Fix get_map_and_key indexing
  • Loading branch information
howardwu authored Nov 1, 2023
2 parents 2a3ea36 + 588baea commit b856693
Show file tree
Hide file tree
Showing 13 changed files with 206 additions and 45 deletions.
12 changes: 12 additions & 0 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -480,6 +480,16 @@ jobs:
workspace_member: ledger
cache_key: snarkvm-ledger-cache

ledger-with-rocksdb:
docker:
- image: cimg/rust:1.71.1
resource_class: 2xlarge
steps:
- run_serial:
flags: --features=rocks
workspace_member: ledger
cache_key: snarkvm-ledger-with-rocksdb-cache

ledger-authority:
docker:
- image: cimg/rust:1.71.1
Expand Down Expand Up @@ -854,6 +864,8 @@ workflows:
- curves
- fields
- ledger
# TODO (howardwu) - Implement `open_testing` on all storage, update to `CurrentConsensusStore::open_testing`, then re-enable.
# - ledger-with-rocksdb
- ledger-authority
- ledger-block
- ledger-coinbase
Expand Down
18 changes: 15 additions & 3 deletions ledger/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -392,11 +392,23 @@ pub(crate) mod test_helpers {
prelude::*,
};
use ledger_block::Block;
use ledger_store::{helpers::memory::ConsensusMemory, ConsensusStore};
use ledger_store::ConsensusStore;
use synthesizer::vm::VM;

pub(crate) type CurrentNetwork = Testnet3;
pub(crate) type CurrentLedger = Ledger<CurrentNetwork, ConsensusMemory<CurrentNetwork>>;

#[cfg(not(feature = "rocks"))]
pub(crate) type CurrentLedger =
Ledger<CurrentNetwork, ledger_store::helpers::memory::ConsensusMemory<CurrentNetwork>>;
#[cfg(feature = "rocks")]
pub(crate) type CurrentLedger = Ledger<CurrentNetwork, ledger_store::helpers::rocksdb::ConsensusDB<CurrentNetwork>>;

#[cfg(not(feature = "rocks"))]
pub(crate) type CurrentConsensusStore =
ConsensusStore<CurrentNetwork, ledger_store::helpers::memory::ConsensusMemory<CurrentNetwork>>;
#[cfg(feature = "rocks")]
pub(crate) type CurrentConsensusStore =
ConsensusStore<CurrentNetwork, ledger_store::helpers::rocksdb::ConsensusDB<CurrentNetwork>>;

#[allow(dead_code)]
pub(crate) struct TestEnv {
Expand Down Expand Up @@ -426,7 +438,7 @@ pub(crate) mod test_helpers {
rng: &mut (impl Rng + CryptoRng),
) -> CurrentLedger {
// Initialize the store.
let store = ConsensusStore::<_, ConsensusMemory<_>>::open(None).unwrap();
let store = CurrentConsensusStore::open(None).unwrap();
// Create a genesis block.
let genesis = VM::from(store).unwrap().genesis_beacon(&private_key, rng).unwrap();
// Initialize the ledger with the genesis block.
Expand Down
52 changes: 51 additions & 1 deletion ledger/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ fn test_state_path() {
}

#[test]
fn test_insufficient_fees() {
fn test_insufficient_private_fees() {
let rng = &mut TestRng::default();

// Initialize the test environment.
Expand Down Expand Up @@ -190,6 +190,56 @@ finalize foo:
}
}

#[test]
fn test_insufficient_public_fees() {
let rng = &mut TestRng::default();

// Initialize the test environment.
let crate::test_helpers::TestEnv { ledger, private_key, .. } = crate::test_helpers::sample_test_env(rng);

// Sample recipient.
let recipient_private_key = PrivateKey::new(rng).unwrap();
let recipient_address = Address::try_from(&recipient_private_key).unwrap();

// Fund the recipient with 1 million credits.
{
let inputs =
[Value::from_str(&format!("{recipient_address}")).unwrap(), Value::from_str("1000000000000u64").unwrap()];
let transaction = ledger
.vm
.execute(&private_key, ("credits.aleo", "transfer_public"), inputs.into_iter(), None, 0, None, rng)
.unwrap();

let block =
ledger.prepare_advance_to_next_beacon_block(&private_key, vec![], vec![], vec![transaction], rng).unwrap();

// Check that the next block is valid.
ledger.check_next_block(&block).unwrap();
// Add the deployment block to the ledger.
ledger.advance_to_next_block(&block).unwrap();
}

println!("-----------");

// Attempt to bond the node with insufficient public fees.
{
let inputs =
[Value::from_str(&format!("{recipient_address}")).unwrap(), Value::from_str("1000000000000u64").unwrap()];
let transaction = ledger
.vm
.execute(&recipient_private_key, ("credits.aleo", "bond_public"), inputs.into_iter(), None, 0, None, rng)
.unwrap();

let block =
ledger.prepare_advance_to_next_beacon_block(&private_key, vec![], vec![], vec![transaction], rng).unwrap();

// Check that the next block is valid.
ledger.check_next_block(&block).unwrap();
// Add the deployment block to the ledger.
ledger.advance_to_next_block(&block).unwrap();
}
}

#[test]
fn test_insufficient_finalize_fees() {
let rng = &mut TestRng::default();
Expand Down
3 changes: 3 additions & 0 deletions ledger/store/src/helpers/rocksdb/internal/id.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,7 @@ pub enum TestMap {
Test2 = DataID::Test2 as u16,
Test3 = DataID::Test3 as u16,
Test4 = DataID::Test4 as u16,
Test5 = DataID::Test5 as u16,
}

/// The RocksDB map prefix.
Expand Down Expand Up @@ -282,4 +283,6 @@ enum DataID {
Test3,
#[cfg(test)]
Test4,
#[cfg(test)]
Test5,
}
115 changes: 94 additions & 21 deletions ledger/store/src/helpers/rocksdb/internal/nested_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

use super::*;
use crate::helpers::{NestedMap, NestedMapRead};
use console::prelude::FromBytes;
use console::prelude::{anyhow, FromBytes};

use core::{fmt, fmt::Debug, hash::Hash, mem};
use std::{borrow::Cow, sync::atomic::Ordering};
Expand Down Expand Up @@ -80,10 +80,20 @@ impl<M: Serialize + DeserializeOwned, K: Serialize + DeserializeOwned, V: Serial
}
#[inline]
fn get_map_and_key(map_key: &[u8]) -> Result<(&[u8], &[u8])> {
let map_len = u32::from_bytes_le(&map_key[PREFIX_LEN..][..4])? as usize;
let map = &map_key[PREFIX_LEN + 4..][..map_len];
let key = &map_key[PREFIX_LEN + 4 + map_len..];
// Retrieve the map length.
let map_len = u32::from_bytes_le(
map_key.get(PREFIX_LEN..PREFIX_LEN + 4).ok_or_else(|| anyhow!("NestedMap map_len index out of range"))?,
)? as usize;

// Retrieve the map bytes.
let map = map_key
.get(PREFIX_LEN + 4..PREFIX_LEN + 4 + map_len)
.ok_or_else(|| anyhow!("NestedMap map index out of range"))?;

// Retrieve the key bytes.
let key = map_key.get(PREFIX_LEN + 4 + map_len..).ok_or_else(|| anyhow!("NestedMap key index out of range"))?;

// Return the map and key bytes.
Ok((map, key))
}

Expand Down Expand Up @@ -140,7 +150,9 @@ impl<
let (map_key, _) = entry?;

// Extract the bytes belonging to the map and the key.
let (entry_map, _) = get_map_and_key(&map_key)?;
let Ok((entry_map, _)) = get_map_and_key(&map_key) else {
break;
};

// If the 'entry_map' matches 'serialized_map', delete the key.
if entry_map == serialized_map {
Expand Down Expand Up @@ -282,7 +294,9 @@ impl<
let (map_key, _) = entry?;

// Extract the bytes belonging to the map and the key.
let (entry_map, _) = get_map_and_key(&map_key)?;
let Ok((entry_map, _)) = get_map_and_key(&map_key) else {
break;
};

// If the 'entry_map' matches 'serialized_map', delete the key.
if entry_map == serialized_map {
Expand Down Expand Up @@ -397,7 +411,9 @@ impl<
let (map_key, value) = entry?;

// Extract the bytes belonging to the map and the key.
let (entry_map, entry_key) = get_map_and_key(&map_key)?;
let Ok((entry_map, entry_key)) = get_map_and_key(&map_key) else {
break;
};

// If the 'entry_map' matches 'serialized_map', deserialize the key and value.
if entry_map == serialized_map {
Expand Down Expand Up @@ -708,7 +724,10 @@ mod tests {
use crate::{
atomic_batch_scope,
atomic_finalize,
helpers::rocksdb::{internal::tests::temp_dir, MapID, TestMap},
helpers::{
rocksdb::{internal::tests::temp_dir, MapID, TestMap},
traits::Map,
},
FinalizeMode,
};
use console::{
Expand Down Expand Up @@ -748,6 +767,28 @@ mod tests {
}
}

fn open_non_nested_map_testing_from_db<
K: Serialize + DeserializeOwned,
V: Serialize + DeserializeOwned,
T: Into<u16>,
>(
database: RocksDB,
map_id: T,
) -> DataMap<K, V> {
// Combine contexts to create a new scope.
let mut context = database.network_id.to_le_bytes().to_vec();
context.extend_from_slice(&(map_id.into()).to_le_bytes());

// Return the DataMap.
DataMap(Arc::new(InnerDataMap {
database,
context,
atomic_batch: Default::default(),
batch_in_progress: Default::default(),
checkpoints: Default::default(),
}))
}

struct TestStorage {
own_map: NestedDataMap<usize, usize, String>,
extra_maps: TestStorage2,
Expand Down Expand Up @@ -798,7 +839,7 @@ mod tests {
self.own_map.is_atomic_in_progress()
&& self.extra_maps.own_map1.is_atomic_in_progress()
&& self.extra_maps.own_map1.is_atomic_in_progress()
&& self.extra_maps.extra_maps.own_map.is_atomic_in_progress()
&& self.extra_maps.extra_maps.own_nested_map.is_atomic_in_progress()
}
}

Expand Down Expand Up @@ -855,35 +896,44 @@ mod tests {
}

struct TestStorage3 {
own_map: NestedDataMap<usize, usize, String>,
own_nested_map: NestedDataMap<usize, usize, String>,
own_map: DataMap<usize, String>,
}

impl TestStorage3 {
fn open(database: RocksDB) -> Self {
Self { own_map: open_map_testing_from_db(database, MapID::Test(TestMap::Test4)) }
Self {
own_nested_map: open_map_testing_from_db(database.clone(), MapID::Test(TestMap::Test4)),
own_map: open_non_nested_map_testing_from_db(database, MapID::Test(TestMap::Test5)),
}
}

fn start_atomic(&self) {
self.own_nested_map.start_atomic();
self.own_map.start_atomic();
}

fn is_atomic_in_progress(&self) -> bool {
self.own_map.is_atomic_in_progress()
self.own_nested_map.is_atomic_in_progress() || self.own_map.is_atomic_in_progress()
}

fn atomic_checkpoint(&self) {
self.own_nested_map.atomic_checkpoint();
self.own_map.atomic_checkpoint();
}

fn clear_latest_checkpoint(&self) {
self.own_nested_map.clear_latest_checkpoint();
self.own_map.clear_latest_checkpoint();
}

fn atomic_rewind(&self) {
self.own_nested_map.atomic_rewind();
self.own_map.atomic_rewind();
}

fn finish_atomic(&self) -> Result<()> {
self.own_nested_map.finish_atomic()?;
self.own_map.finish_atomic()
}
}
Expand Down Expand Up @@ -966,6 +1016,29 @@ mod tests {
crate::helpers::test_helpers::nested_map::check_iterators_match(map);
}

#[test]
#[serial]
#[traced_test]
fn test_iter_from_nested_to_non_nested() {
// Open a storage with a DataMap right after a NestedDataMap.
let database = RocksDB::open_testing(temp_dir(), None).expect("Failed to open a test database");
let test_storage = TestStorage3::open(database);

// Insert 5 (confirmed) records into a nested map 77.
for i in 0..5 {
test_storage.own_nested_map.insert(77, i, i.to_string()).expect("Failed to insert");
}

// Insert 5 (confirmed) records into the neighboring data map; the keys are large on purpose.
for i in 0..5 {
test_storage.own_map.insert(usize::MAX - i, (usize::MAX - i).to_string()).expect("Failed to insert");
}

// We should be able to collect the 5 records from the nested data map.
let confirmed = test_storage.own_nested_map.get_map_confirmed(&77).unwrap();
assert_eq!(confirmed.len(), 5);
}

#[test]
#[serial]
#[traced_test]
Expand Down Expand Up @@ -1574,7 +1647,7 @@ mod tests {
assert!(test_storage.own_map.iter_confirmed().next().is_none());
assert!(test_storage.extra_maps.own_map1.iter_confirmed().next().is_none());
assert!(test_storage.extra_maps.own_map2.iter_confirmed().next().is_none());
assert!(test_storage.extra_maps.extra_maps.own_map.iter_confirmed().next().is_none());
assert!(test_storage.extra_maps.extra_maps.own_nested_map.iter_confirmed().next().is_none());

assert_eq!(test_storage.own_map.checkpoints.lock().last(), None);

Expand Down Expand Up @@ -1603,11 +1676,11 @@ mod tests {
test_storage.extra_maps.own_map2.insert(2, 2, 2.to_string()).unwrap();

// Start another atomic write batch.
atomic_batch_scope!(test_storage.extra_maps.extra_maps.own_map, {
assert!(test_storage.extra_maps.extra_maps.own_map.is_atomic_in_progress());
atomic_batch_scope!(test_storage.extra_maps.extra_maps.own_nested_map, {
assert!(test_storage.extra_maps.extra_maps.own_nested_map.is_atomic_in_progress());

// Write an item into the fourth map.
test_storage.extra_maps.extra_maps.own_map.insert(3, 3, 3.to_string()).unwrap();
test_storage.extra_maps.extra_maps.own_nested_map.insert(3, 3, 3.to_string()).unwrap();

Ok(())
})?;
Expand All @@ -1628,7 +1701,7 @@ mod tests {
assert_eq!(test_storage.own_map.iter_confirmed().count(), 1);
assert_eq!(test_storage.extra_maps.own_map1.iter_confirmed().count(), 1);
assert_eq!(test_storage.extra_maps.own_map2.iter_confirmed().count(), 1);
assert_eq!(test_storage.extra_maps.extra_maps.own_map.iter_confirmed().count(), 1);
assert_eq!(test_storage.extra_maps.extra_maps.own_nested_map.iter_confirmed().count(), 1);

// The atomic_write_batch macro uses ?, so the test returns a Result for simplicity.
Ok(())
Expand Down Expand Up @@ -1659,11 +1732,11 @@ mod tests {
test_storage.extra_maps.own_map2.insert(2, 2, 2.to_string()).unwrap();

// Start another atomic write batch.
let result: Result<()> = atomic_batch_scope!(test_storage.extra_maps.extra_maps.own_map, {
let result: Result<()> = atomic_batch_scope!(test_storage.extra_maps.extra_maps.own_nested_map, {
assert!(test_storage.is_atomic_in_progress_everywhere());

// Write an item into the fourth map.
test_storage.extra_maps.extra_maps.own_map.insert(3, 3, 3.to_string()).unwrap();
test_storage.extra_maps.extra_maps.own_nested_map.insert(3, 3, 3.to_string()).unwrap();

// Rewind the atomic batch via a simulated error.
bail!("An error that will trigger a single rewind.");
Expand All @@ -1690,7 +1763,7 @@ mod tests {
assert!(test_storage.own_map.iter_confirmed().next().is_none());
assert!(test_storage.extra_maps.own_map1.iter_confirmed().next().is_none());
assert!(test_storage.extra_maps.own_map2.iter_confirmed().next().is_none());
assert!(test_storage.extra_maps.extra_maps.own_map.iter_confirmed().next().is_none());
assert!(test_storage.extra_maps.extra_maps.own_nested_map.iter_confirmed().next().is_none());

// Note: all the checks going through .database can be performed on any one
// of the objects, as all of them share the same instance of the database.
Expand All @@ -1706,6 +1779,6 @@ mod tests {
assert_eq!(test_storage.own_map.iter_confirmed().count(), 1);
assert_eq!(test_storage.extra_maps.own_map1.iter_confirmed().count(), 1);
assert_eq!(test_storage.extra_maps.own_map2.iter_confirmed().count(), 1);
assert_eq!(test_storage.extra_maps.extra_maps.own_map.iter_confirmed().count(), 0);
assert_eq!(test_storage.extra_maps.extra_maps.own_nested_map.iter_confirmed().count(), 0);
}
}
Loading

0 comments on commit b856693

Please sign in to comment.