diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index fc5f4d9543a..682c3e86932 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -1686,7 +1686,7 @@ impl Chain { parent_hash: CryptoHash, ) -> Result { let epoch_id = self.epoch_manager.get_epoch_id_from_prev_block(&parent_hash)?; - for shard_id in 0..self.epoch_manager.num_shards(&epoch_id)? { + for shard_id in self.epoch_manager.shard_ids(&epoch_id)? { if self.shard_tracker.care_about_shard(me.as_ref(), &parent_hash, shard_id, true) || self.shard_tracker.will_care_about_shard( me.as_ref(), @@ -2323,7 +2323,7 @@ impl Chain { // the last final block on chain, which is OK, because in the flat storage implementation // we don't assume that. let epoch_id = block.header().epoch_id(); - for shard_id in 0..self.epoch_manager.num_shards(epoch_id)? { + for shard_id in self.epoch_manager.shard_ids(epoch_id)? { let need_flat_storage_update = if is_caught_up { // If we already caught up this epoch, then flat storage exists for both shards which we already track // and shards which will be tracked in next epoch, so we can update them. @@ -2709,7 +2709,8 @@ impl Chain { parent_hash: &CryptoHash, ) -> Result, Error> { let epoch_id = epoch_manager.get_epoch_id_from_prev_block(parent_hash)?; - Ok((0..epoch_manager.num_shards(&epoch_id)?) + Ok((epoch_manager.shard_ids(&epoch_id)?) + .into_iter() .filter(|shard_id| { Self::should_catch_up_shard( epoch_manager, @@ -3591,7 +3592,7 @@ impl Chain { chain_update.commit()?; let epoch_id = block.header().epoch_id(); - for shard_id in 0..self.epoch_manager.num_shards(epoch_id)? { + for shard_id in self.epoch_manager.shard_ids(epoch_id)? { // Update flat storage for each shard being caught up. We catch up a shard if it is tracked in the next // epoch. If it is tracked in this epoch as well, it was updated during regular block processing. if !self.shard_tracker.care_about_shard( @@ -4751,7 +4752,7 @@ impl Chain { } let mut account_id_to_shard_id_map = HashMap::new(); let mut shard_receipts: Vec<_> = - (0..shard_layout.num_shards()).map(|i| (i, Vec::new())).collect(); + shard_layout.shard_ids().into_iter().map(|shard_id| (shard_id, Vec::new())).collect(); for receipt in receipts.iter() { let shard_id = match account_id_to_shard_id_map.get(&receipt.receiver_id) { Some(id) => *id, diff --git a/chain/chain/src/flat_storage_creator.rs b/chain/chain/src/flat_storage_creator.rs index e4c8cb2f9f1..a1497fea21c 100644 --- a/chain/chain/src/flat_storage_creator.rs +++ b/chain/chain/src/flat_storage_creator.rs @@ -435,13 +435,13 @@ impl FlatStorageCreator { num_threads: usize, ) -> Result, Error> { let chain_head = chain_store.head()?; - let num_shards = epoch_manager.num_shards(&chain_head.epoch_id)?; + let shard_ids = epoch_manager.shard_ids(&chain_head.epoch_id)?; let mut shard_creators: HashMap = HashMap::new(); let mut creation_needed = false; let flat_storage_manager = runtime.get_flat_storage_manager(); // Create flat storage for all shards. // TODO(nikurt): Choose which shards need to open the flat storage. - for shard_id in 0..num_shards { + for shard_id in shard_ids { // The node applies transactions from the shards it cares about this and the next epoch. let shard_uid = epoch_manager.shard_id_to_uid(shard_id, &chain_head.epoch_id)?; let status = flat_storage_manager.get_flat_storage_status(shard_uid); diff --git a/chain/chain/src/test_utils.rs b/chain/chain/src/test_utils.rs index 768b99727ec..eb632d50c93 100644 --- a/chain/chain/src/test_utils.rs +++ b/chain/chain/src/test_utils.rs @@ -329,7 +329,7 @@ mod test { shard_layout: &ShardLayout, ) -> Vec { let mut receipts_hashes = vec![]; - for shard_id in 0..shard_layout.num_shards() { + for shard_id in shard_layout.shard_ids() { let shard_receipts: Vec = receipts .iter() .filter(|&receipt| { diff --git a/chain/chain/src/test_utils/kv_runtime.rs b/chain/chain/src/test_utils/kv_runtime.rs index 3a930d370a7..56c2e9f1606 100644 --- a/chain/chain/src/test_utils/kv_runtime.rs +++ b/chain/chain/src/test_utils/kv_runtime.rs @@ -417,6 +417,10 @@ impl EpochManagerAdapter for MockEpochManager { Ok(self.num_shards) } + fn shard_ids(&self, _epoch_id: &EpochId) -> Result, EpochError> { + Ok((0..self.num_shards).collect()) + } + fn num_total_parts(&self) -> usize { 12 + (self.num_shards as usize + 1) % 50 } diff --git a/chain/chunks/src/client.rs b/chain/chunks/src/client.rs index 05936071cfb..44456561e96 100644 --- a/chain/chunks/src/client.rs +++ b/chain/chunks/src/client.rs @@ -144,8 +144,7 @@ impl ShardedTransactionPool { #[cfg(test)] mod tests { - use std::{collections::HashMap, str::FromStr}; - + use crate::client::ShardedTransactionPool; use near_crypto::{InMemorySigner, KeyType}; use near_o11y::testonly::init_test_logger; use near_pool::types::PoolIterator; @@ -157,9 +156,8 @@ mod tests { types::AccountId, }; use near_store::ShardUId; - use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; - - use crate::client::ShardedTransactionPool; + use rand::{rngs::StdRng, seq::SliceRandom, SeedableRng}; + use std::{collections::HashMap, str::FromStr}; const TEST_SEED: RngSeed = [3; 32]; @@ -206,9 +204,9 @@ mod tests { let n = 100; tracing::info!("inserting {n} transactions into the pool using the old shard layout"); for i in 0..n { - let num_shards = old_shard_layout.num_shards(); - let signer_shard_id = rng.gen_range(0..num_shards); - let receiver_shard_id = rng.gen_range(0..num_shards); + let shard_ids: Vec<_> = old_shard_layout.shard_ids().collect(); + let &signer_shard_id = shard_ids.choose(&mut rng).unwrap(); + let &receiver_shard_id = shard_ids.choose(&mut rng).unwrap(); let nonce = i as u64; let signer_id = *shard_id_to_accounts[&signer_shard_id].choose(&mut rng).unwrap(); @@ -242,8 +240,8 @@ mod tests { tracing::info!("checking the pool after resharding"); { - let num_shards = new_shard_layout.num_shards(); - for shard_id in 0..num_shards { + let shard_ids: Vec<_> = new_shard_layout.shard_ids().collect(); + for &shard_id in shard_ids.iter() { let shard_id = shard_id as u32; let shard_uid = ShardUId { shard_id, version: new_shard_layout.version() }; let pool = pool.pool_for_shard(shard_uid); @@ -253,7 +251,7 @@ mod tests { } let mut total = 0; - for shard_id in 0..num_shards { + for shard_id in shard_ids { let shard_id = shard_id as u32; let shard_uid = ShardUId { shard_id, version: new_shard_layout.version() }; let mut pool_iter = pool.get_pool_iterator(shard_uid).unwrap(); diff --git a/chain/chunks/src/lib.rs b/chain/chunks/src/lib.rs index 32ec2395c16..cf66357522d 100644 --- a/chain/chunks/src/lib.rs +++ b/chain/chunks/src/lib.rs @@ -505,7 +505,10 @@ impl ShardsManager { fn get_tracking_shards(&self, parent_hash: &CryptoHash) -> HashSet { let epoch_id = self.epoch_manager.get_epoch_id_from_prev_block(parent_hash).unwrap(); - (0..self.epoch_manager.num_shards(&epoch_id).unwrap()) + self.epoch_manager + .shard_ids(&epoch_id) + .unwrap() + .into_iter() .filter(|chunk_shard_id| { cares_about_shard_this_or_next_epoch( self.me.as_ref(), @@ -1273,7 +1276,7 @@ impl ShardsManager { }; } - if header.shard_id() >= self.epoch_manager.num_shards(&epoch_id)? { + if !self.epoch_manager.shard_ids(&epoch_id)?.contains(&header.shard_id()) { return if epoch_id_confirmed { byzantine_assert!(false); Err(Error::InvalidChunkShardId) @@ -1718,8 +1721,10 @@ impl ShardsManager { let block_producers = self.epoch_manager.get_epoch_block_producers_ordered(&epoch_id, lastest_block_hash)?; let current_chunk_height = partial_encoded_chunk.header.height_created(); - let num_shards = self.epoch_manager.num_shards(&epoch_id)?; - let mut next_chunk_producers = (0..num_shards) + let mut next_chunk_producers = self + .epoch_manager + .shard_ids(&epoch_id)? + .into_iter() .map(|shard_id| { self.epoch_manager.get_chunk_producer(&epoch_id, current_chunk_height + 1, shard_id) }) @@ -1769,8 +1774,7 @@ impl ShardsManager { chunk_entry: &EncodedChunksCacheEntry, ) -> Result { let epoch_id = self.epoch_manager.get_epoch_id_from_prev_block(prev_block_hash)?; - for shard_id in 0..self.epoch_manager.num_shards(&epoch_id)? { - let shard_id = shard_id as ShardId; + for shard_id in self.epoch_manager.shard_ids(&epoch_id)? { if !chunk_entry.receipts.contains_key(&shard_id) { if need_receipt(prev_block_hash, shard_id, self.me.as_ref(), &self.shard_tracker) { return Ok(false); diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs index bfcbcf4c7f5..bc3b8a2bd93 100644 --- a/chain/client/src/client.rs +++ b/chain/client/src/client.rs @@ -1661,7 +1661,7 @@ impl Client { fn produce_chunks(&mut self, block: &Block, validator_id: AccountId) { let epoch_id = self.epoch_manager.get_epoch_id_from_prev_block(block.header().hash()).unwrap(); - for shard_id in 0..self.epoch_manager.num_shards(&epoch_id).unwrap() { + for shard_id in self.epoch_manager.shard_ids(&epoch_id).unwrap() { let next_height = block.header().height() + 1; let epoch_manager = self.epoch_manager.as_ref(); let chunk_proposer = @@ -2544,8 +2544,7 @@ impl Client { let tracked_shards = if self.config.tracked_shards.is_empty() { vec![] } else { - let num_shards = self.epoch_manager.num_shards(&tip.epoch_id)?; - (0..num_shards).collect() + self.epoch_manager.shard_ids(&tip.epoch_id)? }; let tier1_accounts = self.get_tier1_accounts(&tip)?; let block = self.chain.get_block(&tip.last_block_hash)?; diff --git a/chain/client/src/client_actor.rs b/chain/client/src/client_actor.rs index 2c87618e6a9..2731e041f51 100644 --- a/chain/client/src/client_actor.rs +++ b/chain/client/src/client_actor.rs @@ -60,7 +60,7 @@ use near_primitives::epoch_manager::RngSeed; use near_primitives::hash::CryptoHash; use near_primitives::network::{AnnounceAccount, PeerId}; use near_primitives::static_clock::StaticClock; -use near_primitives::types::{BlockHeight, ShardId}; +use near_primitives::types::BlockHeight; use near_primitives::unwrap_or_return; use near_primitives::utils::{from_timestamp, MaybeValidated}; use near_primitives::validator_signer::ValidatorSigner; @@ -1665,18 +1665,22 @@ impl ClientActor { .unwrap() .epoch_id() .clone(); - let shards_to_sync: Vec = - (0..self.client.epoch_manager.num_shards(&epoch_id).unwrap()) - .filter(|x| { - cares_about_shard_this_or_next_epoch( - me.as_ref(), - &prev_hash, - *x, - true, - &self.client.shard_tracker, - ) - }) - .collect(); + let shards_to_sync: Vec<_> = self + .client + .epoch_manager + .shard_ids(&epoch_id) + .unwrap() + .into_iter() + .filter(|&shard_id| { + cares_about_shard_this_or_next_epoch( + me.as_ref(), + &prev_hash, + shard_id, + true, + &self.client.shard_tracker, + ) + }) + .collect(); let use_colour = matches!(self.client.config.log_summary_style, LogSummaryStyle::Colored); diff --git a/chain/client/src/debug.rs b/chain/client/src/debug.rs index 03e773772e0..2cb127c4b65 100644 --- a/chain/client/src/debug.rs +++ b/chain/client/src/debug.rs @@ -334,13 +334,15 @@ impl ClientActor { let epoch_id = self.client.chain.header_head()?.epoch_id; let fetch_hash = self.client.chain.header_head()?.last_block_hash; let me = self.client.validator_signer.as_ref().map(|x| x.validator_id().clone()); - let num_shards = self.client.epoch_manager.num_shards(&epoch_id).unwrap(); - let shards_tracked_this_epoch = (0..num_shards) - .map(|shard_id| { + let shard_ids = self.client.epoch_manager.shard_ids(&epoch_id).unwrap(); + let shards_tracked_this_epoch = shard_ids + .iter() + .map(|&shard_id| { self.client.shard_tracker.care_about_shard(me.as_ref(), &fetch_hash, shard_id, true) }) .collect(); - let shards_tracked_next_epoch = (0..num_shards) + let shards_tracked_next_epoch = shard_ids + .into_iter() .map(|shard_id| { self.client.shard_tracker.will_care_about_shard( me.as_ref(), diff --git a/chain/client/src/info.rs b/chain/client/src/info.rs index eada6911b0a..40d90725940 100644 --- a/chain/client/src/info.rs +++ b/chain/client/src/info.rs @@ -137,8 +137,8 @@ impl InfoHelper { /// Count which shards are tracked by the node in the epoch indicated by head parameter. fn record_tracked_shards(head: &Tip, client: &crate::client::Client) { let me = client.validator_signer.as_ref().map(|x| x.validator_id()); - if let Ok(num_shards) = client.epoch_manager.num_shards(&head.epoch_id) { - for shard_id in 0..num_shards { + if let Ok(shard_ids) = client.epoch_manager.shard_ids(&head.epoch_id) { + for shard_id in shard_ids { let tracked = client.shard_tracker.care_about_shard( me, &head.last_block_hash, @@ -180,8 +180,8 @@ impl InfoHelper { .with_label_values(&[&shard_id.to_string()]) .set(if is_chunk_producer_for_shard { 1 } else { 0 }); } - } else if let Ok(num_shards) = client.epoch_manager.num_shards(&head.epoch_id) { - for shard_id in 0..num_shards { + } else if let Ok(shard_ids) = client.epoch_manager.shard_ids(&head.epoch_id) { + for shard_id in shard_ids { metrics::IS_CHUNK_PRODUCER_FOR_SHARD .with_label_values(&[&shard_id.to_string()]) .set(0); @@ -196,7 +196,7 @@ impl InfoHelper { fn record_epoch_settlement_info(head: &Tip, client: &crate::client::Client) { let epoch_info = client.epoch_manager.get_epoch_info(&head.epoch_id); let blocks_in_epoch = client.config.epoch_length; - let number_of_shards = client.epoch_manager.num_shards(&head.epoch_id).unwrap_or_default(); + let shard_ids = client.epoch_manager.shard_ids(&head.epoch_id).unwrap_or_default(); if let Ok(epoch_info) = epoch_info { metrics::VALIDATORS_CHUNKS_EXPECTED_IN_EPOCH.reset(); metrics::VALIDATORS_BLOCKS_EXPECTED_IN_EPOCH.reset(); @@ -236,7 +236,7 @@ impl InfoHelper { .set(stake_to_blocks(stake, stake_sum)) }); - for shard_id in 0..number_of_shards { + for shard_id in shard_ids { let mut stake_per_cp = HashMap::::new(); stake_sum = 0; for &id in &epoch_info.chunk_producers_settlement()[shard_id as usize] { diff --git a/chain/client/src/view_client.rs b/chain/client/src/view_client.rs index adfa22b7dd2..080238a83d5 100644 --- a/chain/client/src/view_client.rs +++ b/chain/client/src/view_client.rs @@ -264,7 +264,7 @@ impl ViewClientActor { let head = self.chain.head()?; let epoch_id = self.epoch_manager.get_epoch_id(&head.last_block_hash)?; let epoch_info: Arc = self.epoch_manager.get_epoch_info(&epoch_id)?; - let num_shards = self.epoch_manager.num_shards(&epoch_id)?; + let shard_ids = self.epoch_manager.shard_ids(&epoch_id)?; let cur_block_info = self.epoch_manager.get_block_info(&head.last_block_hash)?; let next_epoch_start_height = self.epoch_manager.get_epoch_start_height(cur_block_info.hash())? @@ -277,8 +277,9 @@ impl ViewClientActor { for block_height in head.height..next_epoch_start_height { let bp = epoch_info.sample_block_producer(block_height); let bp = epoch_info.get_validator(bp).account_id().clone(); - let cps: Vec = (0..num_shards) - .map(|shard_id| { + let cps: Vec = shard_ids + .iter() + .map(|&shard_id| { let cp = epoch_info.sample_chunk_producer(block_height, shard_id); let cp = epoch_info.get_validator(cp).account_id().clone(); cp diff --git a/chain/epoch-manager/src/adapter.rs b/chain/epoch-manager/src/adapter.rs index d95be2e0685..c7f0e9efd4b 100644 --- a/chain/epoch-manager/src/adapter.rs +++ b/chain/epoch-manager/src/adapter.rs @@ -37,6 +37,9 @@ pub trait EpochManagerAdapter: Send + Sync { /// Get current number of shards. fn num_shards(&self, epoch_id: &EpochId) -> Result; + /// Get the list of shard ids + fn shard_ids(&self, epoch_id: &EpochId) -> Result, EpochError>; + /// Number of Reed-Solomon parts we split each chunk into. /// /// Note: this shouldn't be too large, our Reed-Solomon supports at most 256 @@ -412,6 +415,11 @@ impl EpochManagerAdapter for EpochManagerHandle { Ok(epoch_manager.get_shard_layout(epoch_id)?.num_shards()) } + fn shard_ids(&self, epoch_id: &EpochId) -> Result, EpochError> { + let epoch_manager = self.read(); + Ok(epoch_manager.get_shard_layout(epoch_id)?.shard_ids().collect()) + } + fn num_total_parts(&self) -> usize { let seats = self.read().genesis_num_block_producer_seats; if seats > 1 { diff --git a/chain/epoch-manager/src/shard_tracker.rs b/chain/epoch-manager/src/shard_tracker.rs index 2b6f22400ae..c599be9871a 100644 --- a/chain/epoch-manager/src/shard_tracker.rs +++ b/chain/epoch-manager/src/shard_tracker.rs @@ -71,10 +71,11 @@ impl ShardTracker { TrackedConfig::Accounts(tracked_accounts) => { let shard_layout = self.epoch_manager.get_shard_layout(epoch_id)?; let tracking_mask = self.tracking_shards_cache.get_or_put(epoch_id.clone(), |_| { - let mut tracking_mask = vec![false; shard_layout.num_shards() as usize]; + let mut tracking_mask: Vec<_> = + shard_layout.shard_ids().map(|_| false).collect(); for account_id in tracked_accounts { let shard_id = account_id_to_shard_id(account_id, &shard_layout); - *tracking_mask.get_mut(shard_id as usize).unwrap() = true; + tracking_mask[shard_id as usize] = true; } tracking_mask }); diff --git a/chain/epoch-manager/src/validator_selection.rs b/chain/epoch-manager/src/validator_selection.rs index 574b6d199b7..740295f1afb 100644 --- a/chain/epoch-manager/src/validator_selection.rs +++ b/chain/epoch-manager/src/validator_selection.rs @@ -5,7 +5,7 @@ use near_primitives::epoch_manager::{EpochConfig, RngSeed}; use near_primitives::errors::EpochError; use near_primitives::types::validator_stake::ValidatorStake; use near_primitives::types::{ - AccountId, Balance, ProtocolVersion, ValidatorId, ValidatorKickoutReason, + AccountId, Balance, NumShards, ProtocolVersion, ValidatorId, ValidatorKickoutReason, }; #[cfg(feature = "protocol_feature_chunk_validation")] use near_primitives::validator_mandates::{ValidatorMandates, ValidatorMandatesConfig}; @@ -32,7 +32,7 @@ pub fn proposals_to_epoch_info( "Proposals should not have duplicates" ); - let num_shards = epoch_config.shard_layout.num_shards(); + let shard_ids: Vec<_> = epoch_config.shard_layout.shard_ids().collect(); let min_stake_ratio = { let rational = epoch_config.validator_selection_config.minimum_stake_ratio; Ratio::new(*rational.numer() as u128, *rational.denom() as u128) @@ -64,7 +64,7 @@ pub fn proposals_to_epoch_info( &mut chunk_producer_proposals, max_cp_selected, min_stake_ratio, - num_shards, + shard_ids.len() as NumShards, last_version, ); (chunk_producer_proposals, chunk_producers, cp_stake_treshold) @@ -114,13 +114,15 @@ pub fn proposals_to_epoch_info( { let minimum_validators_per_shard = epoch_config.validator_selection_config.minimum_validators_per_shard as usize; - let shard_assignment = - assign_shards(chunk_producers, num_shards, minimum_validators_per_shard).map_err( - |_| EpochError::NotEnoughValidators { - num_validators: num_chunk_producers as u64, - num_shards, - }, - )?; + let shard_assignment = assign_shards( + chunk_producers, + shard_ids.len() as NumShards, + minimum_validators_per_shard, + ) + .map_err(|_| EpochError::NotEnoughValidators { + num_validators: num_chunk_producers as u64, + num_shards: shard_ids.len() as NumShards, + })?; let mut chunk_producers_settlement: Vec> = shard_assignment.iter().map(|vs| Vec::with_capacity(vs.len())).collect(); @@ -152,14 +154,19 @@ pub fn proposals_to_epoch_info( } else { if chunk_producers.is_empty() { // All validators tried to unstake? - return Err(EpochError::NotEnoughValidators { num_validators: 0u64, num_shards }); + return Err(EpochError::NotEnoughValidators { + num_validators: 0u64, + num_shards: shard_ids.len() as NumShards, + }); } let mut id = 0usize; // Here we assign validators to chunks (we try to keep number of shards assigned for // each validator as even as possible). Note that in prod configuration number of seats // per shard is the same as maximal number of block producers, so normally all // validators would be assigned to all chunks - (0usize..(num_shards as usize)) + shard_ids + .iter() + .map(|&shard_id| shard_id as usize) .map(|shard_id| { (0..epoch_config.num_block_producer_seats_per_shard[shard_id] .min(block_producers_settlement.len() as u64)) @@ -179,7 +186,7 @@ pub fn proposals_to_epoch_info( // TODO(#10014) determine `min_mandates_per_shard` let min_mandates_per_shard = 0; let validator_mandates_config = - ValidatorMandatesConfig::new(threshold, min_mandates_per_shard, num_shards as usize); + ValidatorMandatesConfig::new(threshold, min_mandates_per_shard, shard_ids.len()); // We can use `all_validators` to construct mandates Since a validator's position in // `all_validators` corresponds to its `ValidatorId` ValidatorMandates::new(validator_mandates_config, &all_validators) diff --git a/core/primitives/src/shard_layout.rs b/core/primitives/src/shard_layout.rs index cce0b8bf031..b46ec390e80 100644 --- a/core/primitives/src/shard_layout.rs +++ b/core/primitives/src/shard_layout.rs @@ -194,7 +194,7 @@ impl ShardLayout { /// Returns error if `shard_id` is an invalid shard id in the current layout /// Panics if `self` has no parent shard layout pub fn get_parent_shard_id(&self, shard_id: ShardId) -> Result { - if shard_id > self.num_shards() { + if !self.shard_ids().any(|id| id == shard_id) { return Err(ShardLayoutError::InvalidShardIdError { shard_id }); } let parent_shard_id = match self { @@ -225,6 +225,10 @@ impl ShardLayout { } } + pub fn shard_ids(&self) -> impl Iterator { + 0..self.num_shards() + } + /// Returns shard uids for all shards in the shard layout pub fn get_shard_uids(&self) -> Vec { (0..self.num_shards()).map(|x| ShardUId::from_shard_id_and_layout(x, self)).collect() diff --git a/core/store/src/cold_storage.rs b/core/store/src/cold_storage.rs index f3d8066d1b6..0b21540d107 100644 --- a/core/store/src/cold_storage.rs +++ b/core/store/src/cold_storage.rs @@ -281,9 +281,10 @@ fn get_keys_from_store( DBKeyType::PreviousBlockHash => { vec![block.header().prev_hash().as_bytes().to_vec()] } - DBKeyType::ShardId => { - (0..shard_layout.num_shards()).map(|si| si.to_le_bytes().to_vec()).collect() - } + DBKeyType::ShardId => shard_layout + .shard_ids() + .map(|shard_id| shard_id.to_le_bytes().to_vec()) + .collect(), DBKeyType::ShardUId => shard_layout .get_shard_uids() .iter() @@ -345,7 +346,8 @@ fn get_keys_from_store( DBCol::OutcomeIds.key_type(), &[DBKeyType::BlockHash, DBKeyType::ShardId] ); - (0..shard_layout.num_shards()) + shard_layout + .shard_ids() .map(|shard_id| { store.get_ser( DBCol::OutcomeIds, diff --git a/core/store/src/genesis/initialization.rs b/core/store/src/genesis/initialization.rs index 62e3b14a87c..9bff66884d3 100644 --- a/core/store/src/genesis/initialization.rs +++ b/core/store/src/genesis/initialization.rs @@ -99,9 +99,9 @@ fn genesis_state_from_genesis(store: Store, genesis: &Genesis) -> Vec let storage_usage_config = &runtime_config.fees.storage_usage_config; let initial_epoch_config = EpochConfig::from(&genesis.config); let shard_layout = initial_epoch_config.shard_layout; - let num_shards = shard_layout.num_shards(); + let shard_ids: Vec<_> = shard_layout.shard_ids().collect(); let mut shard_account_ids: Vec> = - (0..num_shards).map(|_| HashSet::new()).collect(); + shard_ids.iter().map(|_| HashSet::new()).collect(); let mut has_protocol_account = false; info!(target: "store","distributing records to shards"); @@ -124,7 +124,7 @@ fn genesis_state_from_genesis(store: Store, genesis: &Genesis) -> Vec ); let writers = std::sync::atomic::AtomicUsize::new(0); - (0..num_shards) + shard_ids .into_par_iter() .map(|shard_id| { let validators = genesis diff --git a/genesis-tools/genesis-csv-to-json/src/csv_to_json_configs.rs b/genesis-tools/genesis-csv-to-json/src/csv_to_json_configs.rs index 595f5514c38..0cd222d74e6 100644 --- a/genesis-tools/genesis-csv-to-json/src/csv_to_json_configs.rs +++ b/genesis-tools/genesis-csv-to-json/src/csv_to_json_configs.rs @@ -38,7 +38,7 @@ pub fn csv_to_json_configs(home: &Path, chain_id: String, tracked_shards: Vec= NUM_SHARDS) { + if tracked_shards.iter().any(|&shard_id| shard_id >= NUM_SHARDS) { panic!("Trying to track a shard that does not exist"); } diff --git a/genesis-tools/genesis-populate/src/lib.rs b/genesis-tools/genesis-populate/src/lib.rs index eb41cfdbbce..2f0a561eabd 100644 --- a/genesis-tools/genesis-populate/src/lib.rs +++ b/genesis-tools/genesis-populate/src/lib.rs @@ -142,8 +142,8 @@ impl GenesisBuilder { self.unflushed_records = self.roots.keys().cloned().map(|shard_idx| (shard_idx, vec![])).collect(); - let num_shards = self.genesis.config.shard_layout.num_shards(); - let total_accounts_num = self.additional_accounts_num * num_shards; + let shard_ids: Vec<_> = self.genesis.config.shard_layout.shard_ids().collect(); + let total_accounts_num = self.additional_accounts_num * shard_ids.len() as u64; let bar = ProgressBar::new(total_accounts_num as _); bar.set_style(ProgressStyle::default_bar().template( "[elapsed {elapsed_precise} remaining {eta_precise}] Writing into storage {bar} {pos:>7}/{len:7}", @@ -155,7 +155,7 @@ impl GenesisBuilder { bar.inc(1); } - for shard_id in 0..num_shards { + for shard_id in shard_ids { self.flush_shard_records(shard_id)?; } bar.finish(); diff --git a/integration-tests/src/tests/client/state_dump.rs b/integration-tests/src/tests/client/state_dump.rs index 37b4ad2f209..bc07a7e146d 100644 --- a/integration-tests/src/tests/client/state_dump.rs +++ b/integration-tests/src/tests/client/state_dump.rs @@ -84,10 +84,10 @@ fn test_state_dump() { wait_or_timeout(100, 10000, || async { let mut all_parts_present = true; - let num_shards = epoch_manager.num_shards(&epoch_id).unwrap(); - assert_ne!(num_shards, 0); + let shard_ids = epoch_manager.shard_ids(&epoch_id).unwrap(); + assert_ne!(shard_ids.len(), 0); - for shard_id in 0..num_shards { + for shard_id in shard_ids { let num_parts = 1; for part_id in 0..num_parts { let path = root_dir.path().join(external_storage_location( @@ -258,10 +258,10 @@ fn run_state_sync_with_dumped_parts( wait_or_timeout(100, 10000, || async { let mut all_parts_present = true; - let num_shards = epoch_manager.num_shards(&epoch_id).unwrap(); - assert_ne!(num_shards, 0); + let shard_ids = epoch_manager.shard_ids(&epoch_id).unwrap(); + assert_ne!(shard_ids.len(), 0); - for shard_id in 0..num_shards { + for shard_id in shard_ids { for part_id in 0..num_parts { let path = root_dir.path().join(external_storage_location( &config.chain_id, diff --git a/nearcore/src/runtime/mod.rs b/nearcore/src/runtime/mod.rs index b06b7ad57d1..a68115b957f 100644 --- a/nearcore/src/runtime/mod.rs +++ b/nearcore/src/runtime/mod.rs @@ -1345,8 +1345,7 @@ mod test { use near_primitives::challenge::SlashedValidator; use near_primitives::transaction::{Action, DeleteAccountAction, StakeAction, TransferAction}; use near_primitives::types::{ - BlockHeightDelta, Nonce, NumShards, ValidatorId, ValidatorInfoIdentifier, - ValidatorKickoutReason, + BlockHeightDelta, Nonce, ValidatorId, ValidatorInfoIdentifier, ValidatorKickoutReason, }; use near_primitives::validator_signer::ValidatorSigner; use near_primitives::views::{ @@ -1583,30 +1582,32 @@ mod test { challenges_result: ChallengesResult, ) { let new_hash = hash(&[(self.head.height + 1) as u8]); - let num_shards = self.epoch_manager.num_shards(&self.head.epoch_id).unwrap(); - assert_eq!(transactions.len() as NumShards, num_shards); - assert_eq!(chunk_mask.len() as NumShards, num_shards); + let shard_ids = self.epoch_manager.shard_ids(&self.head.epoch_id).unwrap(); + assert_eq!(transactions.len(), shard_ids.len()); + assert_eq!(chunk_mask.len(), shard_ids.len()); let mut all_proposals = vec![]; let mut all_receipts = vec![]; - for i in 0..num_shards { + for shard_id in shard_ids { let (state_root, proposals, receipts) = self.runtime.update( - &self.state_roots[i as usize], - i, + &self.state_roots[shard_id as usize], + shard_id, self.head.height + 1, 0, &self.head.last_block_hash, &new_hash, - self.last_receipts.get(&i).map_or(&[], |v| v.as_slice()), - &transactions[i as usize], - ValidatorStakeIter::new(self.last_shard_proposals.get(&i).unwrap_or(&vec![])), + self.last_receipts.get(&shard_id).map_or(&[], |v| v.as_slice()), + &transactions[shard_id as usize], + ValidatorStakeIter::new( + self.last_shard_proposals.get(&shard_id).unwrap_or(&vec![]), + ), self.runtime.genesis_config.min_gas_price, u64::max_value(), &challenges_result, ); - self.state_roots[i as usize] = state_root; + self.state_roots[shard_id as usize] = state_root; all_receipts.extend(receipts); all_proposals.append(&mut proposals.clone()); - self.last_shard_proposals.insert(i as ShardId, proposals); + self.last_shard_proposals.insert(shard_id, proposals); } self.epoch_manager .add_validator_proposals(BlockHeaderInfo { diff --git a/nearcore/src/state_sync.rs b/nearcore/src/state_sync.rs index 123d1ad0f9a..b6af75d46bb 100644 --- a/nearcore/src/state_sync.rs +++ b/nearcore/src/state_sync.rs @@ -66,7 +66,7 @@ pub fn spawn_state_sync_dump( // Determine how many threads to start. // TODO: Handle the case of changing the shard layout. - let num_shards = { + let shard_ids = { // Sadly, `Chain` is not `Send` and each thread needs to create its own `Chain` instance. let chain = Chain::new_for_view_client( epoch_manager.clone(), @@ -77,13 +77,14 @@ pub fn spawn_state_sync_dump( false, )?; let epoch_id = chain.head()?.epoch_id; - epoch_manager.num_shards(&epoch_id) + epoch_manager.shard_ids(&epoch_id) }?; let chain_id = client_config.chain_id.clone(); let keep_running = Arc::new(AtomicBool::new(true)); // Start a thread for each shard. - let handles = (0..num_shards as usize) + let handles = shard_ids + .into_iter() .map(|shard_id| { let runtime = runtime.clone(); let chain_genesis = chain_genesis.clone(); diff --git a/tools/amend-genesis/src/lib.rs b/tools/amend-genesis/src/lib.rs index ae6373e25d8..0fcf9d62af0 100644 --- a/tools/amend-genesis/src/lib.rs +++ b/tools/amend-genesis/src/lib.rs @@ -350,12 +350,14 @@ pub fn amend_genesis( if let Some(n) = genesis_changes.num_seats { genesis.config.num_block_producer_seats = n; } - if let Some(l) = shard_layout { + if let Some(shard_layout) = shard_layout { genesis.config.avg_hidden_validator_seats_per_shard = - (0..l.num_shards()).map(|_| 0).collect(); - genesis.config.num_block_producer_seats_per_shard = - utils::get_num_seats_per_shard(l.num_shards(), genesis.config.num_block_producer_seats); - genesis.config.shard_layout = l; + shard_layout.shard_ids().into_iter().map(|_| 0).collect(); + genesis.config.num_block_producer_seats_per_shard = utils::get_num_seats_per_shard( + shard_layout.num_shards(), + genesis.config.num_block_producer_seats, + ); + genesis.config.shard_layout = shard_layout; } if let Some(v) = genesis_changes.protocol_version { genesis.config.protocol_version = v; diff --git a/tools/mock-node/src/main.rs b/tools/mock-node/src/main.rs index 1d18afeb652..f53300fffbe 100644 --- a/tools/mock-node/src/main.rs +++ b/tools/mock-node/src/main.rs @@ -112,7 +112,7 @@ fn main() -> anyhow::Result<()> { let signer = InMemorySigner::from_random("mock_node".parse().unwrap(), KeyType::ED25519); near_config.network_config.node_key = signer.secret_key; near_config.client_config.tracked_shards = - (0..near_config.genesis.config.shard_layout.num_shards()).collect(); + near_config.genesis.config.shard_layout.shard_ids().collect(); if near_config.rpc_config.is_none() { near_config.rpc_config = Some(near_jsonrpc::RpcConfig::default()); } diff --git a/tools/state-viewer/src/epoch_info.rs b/tools/state-viewer/src/epoch_info.rs index 2da6dfcc45f..ca2c083009b 100644 --- a/tools/state-viewer/src/epoch_info.rs +++ b/tools/state-viewer/src/epoch_info.rs @@ -89,12 +89,13 @@ fn display_block_and_chunk_producers( ) -> anyhow::Result<()> { let block_height_range: Range = get_block_height_range(epoch_id, chain_store, epoch_manager)?; - let num_shards = epoch_manager.num_shards(epoch_id).unwrap(); + let shard_ids = epoch_manager.shard_ids(epoch_id).unwrap(); for block_height in block_height_range { let bp = epoch_info.sample_block_producer(block_height); let bp = epoch_info.get_validator(bp).account_id().clone(); - let cps: Vec = (0..num_shards) - .map(|shard_id| { + let cps: Vec = shard_ids + .iter() + .map(|&shard_id| { let cp = epoch_info.sample_chunk_producer(block_height, shard_id); let cp = epoch_info.get_validator(cp).account_id().clone(); cp.as_str().to_string() @@ -272,12 +273,12 @@ fn display_validator_info( .collect(); println!("Block producer for {} blocks: {bp_for_blocks:?}", bp_for_blocks.len()); - let shard_ids = 0..epoch_manager.num_shards(epoch_id).unwrap(); + let shard_ids = epoch_manager.shard_ids(epoch_id).unwrap(); let cp_for_chunks: Vec<(BlockHeight, ShardId)> = block_height_range .flat_map(|block_height| { shard_ids - .clone() - .map(|shard_id| (block_height, shard_id)) + .iter() + .map(|&shard_id| (block_height, shard_id)) .filter(|&(block_height, shard_id)| { epoch_info.sample_chunk_producer(block_height, shard_id) == *validator_id }) diff --git a/tools/state-viewer/src/state_changes.rs b/tools/state-viewer/src/state_changes.rs index 4ec39df8877..bbdf8b6b450 100644 --- a/tools/state-viewer/src/state_changes.rs +++ b/tools/state-viewer/src/state_changes.rs @@ -83,7 +83,8 @@ fn dump_state_changes( let block_hash = block_header.hash(); let epoch_id = block_header.epoch_id(); let key = KeyForStateChanges::for_block(block_header.hash()); - let mut state_changes_per_shard = vec![vec![];epoch_manager.num_shards(epoch_id).unwrap() as usize]; + let mut state_changes_per_shard: Vec<_> = + epoch_manager.shard_ids(epoch_id).unwrap().into_iter().map(|_| vec![]).collect(); for row in key.find_rows_iter(&store) { let (key, value) = row.unwrap();