diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 36d5433d6dd99e..5c9768aa0bc215 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -33,7 +33,7 @@ use { solana_client::connection_cache::ConnectionCache, solana_gossip::cluster_info::ClusterInfo, solana_ledger::blockstore_processor::TransactionStatusSender, - solana_measure::{measure_time, measure_us}, + solana_measure::measure_us, solana_perf::{data_budget::DataBudget, packet::PACKETS_PER_BATCH}, solana_poh::poh_recorder::{PohRecorder, TransactionRecorder}, solana_runtime::{ @@ -676,13 +676,13 @@ impl BankingStage { if unprocessed_transaction_storage.should_not_process() { return; } - let (decision, make_decision_time) = - measure_time!(decision_maker.make_consume_or_forward_decision()); + let (decision, make_decision_us) = + measure_us!(decision_maker.make_consume_or_forward_decision()); let metrics_action = slot_metrics_tracker.check_leader_slot_boundary( decision.bank_start(), Some(unprocessed_transaction_storage), ); - slot_metrics_tracker.increment_make_decision_us(make_decision_time.as_us()); + slot_metrics_tracker.increment_make_decision_us(make_decision_us); match decision { BufferedPacketsDecision::Consume(bank_start) => { @@ -691,17 +691,15 @@ impl BankingStage { // packet processing metrics from the next slot towards the metrics // of the previous slot slot_metrics_tracker.apply_action(metrics_action); - let (_, consume_buffered_packets_time) = measure_time!( - consumer.consume_buffered_packets( + let (_, consume_buffered_packets_us) = measure_us!(consumer + .consume_buffered_packets( &bank_start, unprocessed_transaction_storage, banking_stage_stats, slot_metrics_tracker, - ), - "consume_buffered_packets", - ); + )); slot_metrics_tracker - .increment_consume_buffered_packets_us(consume_buffered_packets_time.as_us()); + .increment_consume_buffered_packets_us(consume_buffered_packets_us); } BufferedPacketsDecision::Forward => { let ((), forward_us) = measure_us!(forwarder.handle_forwarding( @@ -750,20 +748,17 @@ impl BankingStage { if !unprocessed_transaction_storage.is_empty() || last_metrics_update.elapsed() >= SLOT_BOUNDARY_CHECK_PERIOD { - let (_, process_buffered_packets_time) = measure_time!( - Self::process_buffered_packets( - decision_maker, - forwarder, - consumer, - &mut unprocessed_transaction_storage, - &banking_stage_stats, - &mut slot_metrics_tracker, - &mut tracer_packet_stats, - ), - "process_buffered_packets", - ); + let (_, process_buffered_packets_us) = measure_us!(Self::process_buffered_packets( + decision_maker, + forwarder, + consumer, + &mut unprocessed_transaction_storage, + &banking_stage_stats, + &mut slot_metrics_tracker, + &mut tracer_packet_stats, + )); slot_metrics_tracker - .increment_process_buffered_packets_us(process_buffered_packets_time.as_us()); + .increment_process_buffered_packets_us(process_buffered_packets_us); last_metrics_update = Instant::now(); } diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 45ac6872c8f2f4..ce6638016433c6 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -17,7 +17,7 @@ use { }, itertools::Itertools, min_max_heap::MinMaxHeap, - solana_measure::{measure_time, measure_us}, + solana_measure::measure_us, solana_runtime::bank::Bank, solana_sdk::{ clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, feature_set::FeatureSet, hash::Hash, @@ -636,35 +636,24 @@ impl ThreadLocalUnprocessedPackets { if accepting_packets { let ( (sanitized_transactions, transaction_to_packet_indexes), - packet_conversion_time, - ): ( - (Vec, Vec), - _, - ) = measure_time!( - self.sanitize_unforwarded_packets( - &packets_to_forward, - &bank, - &mut total_dropped_packets - ), - "sanitize_packet", - ); + packet_conversion_us, + ) = measure_us!(self.sanitize_unforwarded_packets( + &packets_to_forward, + &bank, + &mut total_dropped_packets + )); saturating_add_assign!( total_packet_conversion_us, - packet_conversion_time.as_us() + packet_conversion_us ); - let (forwardable_transaction_indexes, filter_packets_time) = measure_time!( - Self::filter_invalid_transactions( + let (forwardable_transaction_indexes, filter_packets_us) = + measure_us!(Self::filter_invalid_transactions( &sanitized_transactions, &bank, &mut total_dropped_packets - ), - "filter_packets", - ); - saturating_add_assign!( - total_filter_packets_us, - filter_packets_time.as_us() - ); + )); + saturating_add_assign!(total_filter_packets_us, filter_packets_us); for forwardable_transaction_index in &forwardable_transaction_indexes { saturating_add_assign!(total_forwardable_packets, 1); diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 209c7923b59672..bdcf82febb59de 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -27,7 +27,7 @@ use { solana_entry::entry::{ self, create_ticks, Entry, EntrySlice, EntryType, EntryVerificationStatus, VerifyRecyclers, }, - solana_measure::{measure::Measure, measure_time}, + solana_measure::{measure::Measure, measure_us}, solana_metrics::datapoint_error, solana_rayon_threadlimit::{get_max_thread_count, get_thread_count}, solana_runtime::{ @@ -187,24 +187,23 @@ pub fn execute_batch( .. } = tx_results; - let (check_block_cost_limits_result, check_block_cost_limits_time): (Result<()>, Measure) = - measure_time!(if bank - .feature_set - .is_active(&feature_set::apply_cost_tracker_during_replay::id()) - { - check_block_cost_limits( - bank, - &loaded_accounts_stats, - &execution_results, - batch.sanitized_transactions(), - ) - } else { - Ok(()) - }); + let (check_block_cost_limits_result, check_block_cost_limits_us) = measure_us!(if bank + .feature_set + .is_active(&feature_set::apply_cost_tracker_during_replay::id()) + { + check_block_cost_limits( + bank, + &loaded_accounts_stats, + &execution_results, + batch.sanitized_transactions(), + ) + } else { + Ok(()) + }); timings.saturating_add_in_place( ExecuteTimingType::CheckBlockLimitsUs, - check_block_cost_limits_time.as_us(), + check_block_cost_limits_us, ); check_block_cost_limits_result?; @@ -328,20 +327,15 @@ fn execute_batches_internal( let transaction_count = transaction_batch.batch.sanitized_transactions().len() as u64; let mut timings = ExecuteTimings::default(); - let (result, execute_batches_time): (Result<()>, Measure) = measure_time!( - { - execute_batch( - transaction_batch, - bank, - transaction_status_sender, - replay_vote_sender, - &mut timings, - log_messages_bytes_limit, - prioritization_fee_cache, - ) - }, - "execute_batch", - ); + let (result, execute_batches_us) = measure_us!(execute_batch( + transaction_batch, + bank, + transaction_status_sender, + replay_vote_sender, + &mut timings, + log_messages_bytes_limit, + prioritization_fee_cache, + )); let thread_index = replay_tx_thread_pool.current_thread_index().unwrap(); execution_timings_per_thread @@ -354,14 +348,14 @@ fn execute_batches_internal( total_transactions_executed, execute_timings: total_thread_execute_timings, } = thread_execution_time; - *total_thread_us += execute_batches_time.as_us(); + *total_thread_us += execute_batches_us; *total_transactions_executed += transaction_count; total_thread_execute_timings .saturating_add_in_place(ExecuteTimingType::TotalBatchesLen, 1); total_thread_execute_timings.accumulate(&timings); }) .or_insert(ThreadExecuteTimings { - total_thread_us: execute_batches_time.as_us(), + total_thread_us: execute_batches_us, total_transactions_executed: transaction_count, execute_timings: timings, }); diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index fba9f4d71f1b6c..0934fe8de512d5 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -21,7 +21,7 @@ use { poh::Poh, }, solana_ledger::{blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache}, - solana_measure::{measure_time, measure_us}, + solana_measure::measure_us, solana_metrics::poh_timing_point::{send_poh_timing_point, PohTimingSender, SlotPohTimingInfo}, solana_runtime::{bank::Bank, installed_scheduler_pool::BankWithScheduler}, solana_sdk::{ @@ -848,20 +848,17 @@ impl PohRecorder { } pub fn tick(&mut self) { - let ((poh_entry, target_time), tick_lock_contention_time) = measure_time!( - { - let mut poh_l = self.poh.lock().unwrap(); - let poh_entry = poh_l.tick(); - let target_time = if poh_entry.is_some() { - Some(poh_l.target_poh_time(self.target_ns_per_tick)) - } else { - None - }; - (poh_entry, target_time) - }, - "tick_lock_contention", - ); - self.tick_lock_contention_us += tick_lock_contention_time.as_us(); + let ((poh_entry, target_time), tick_lock_contention_us) = measure_us!({ + let mut poh_l = self.poh.lock().unwrap(); + let poh_entry = poh_l.tick(); + let target_time = if poh_entry.is_some() { + Some(poh_l.target_poh_time(self.target_ns_per_tick)) + } else { + None + }; + (poh_entry, target_time) + }); + self.tick_lock_contention_us += tick_lock_contention_us; if let Some(poh_entry) = poh_entry { self.tick_height += 1; @@ -884,24 +881,19 @@ impl PohRecorder { self.tick_height, )); - let (_flush_res, flush_cache_and_tick_time) = - measure_time!(self.flush_cache(true), "flush_cache_and_tick"); - self.flush_cache_tick_us += flush_cache_and_tick_time.as_us(); - - let sleep_time = measure_time!( - { - let target_time = target_time.unwrap(); - // sleep is not accurate enough to get a predictable time. - // Kernel can not schedule the thread for a while. - while Instant::now() < target_time { - // TODO: a caller could possibly desire to reset or record while we're spinning here - std::hint::spin_loop(); - } - }, - "poh_sleep", - ) - .1; - self.total_sleep_us += sleep_time.as_us(); + let (_flush_res, flush_cache_and_tick_us) = measure_us!(self.flush_cache(true)); + self.flush_cache_tick_us += flush_cache_and_tick_us; + + let (_, sleep_us) = measure_us!({ + let target_time = target_time.unwrap(); + // sleep is not accurate enough to get a predictable time. + // Kernel can not schedule the thread for a while. + while Instant::now() < target_time { + // TODO: a caller could possibly desire to reset or record while we're spinning here + std::hint::spin_loop(); + } + }); + self.total_sleep_us += sleep_us; } } @@ -949,14 +941,12 @@ impl PohRecorder { // cannot be generated by `record()` assert!(!transactions.is_empty(), "No transactions provided"); - let ((), report_metrics_time) = - measure_time!(self.report_metrics(bank_slot), "report_metrics"); - self.report_metrics_us += report_metrics_time.as_us(); + let ((), report_metrics_us) = measure_us!(self.report_metrics(bank_slot)); + self.report_metrics_us += report_metrics_us; loop { - let (flush_cache_res, flush_cache_time) = - measure_time!(self.flush_cache(false), "flush_cache"); - self.flush_cache_no_tick_us += flush_cache_time.as_us(); + let (flush_cache_res, flush_cache_us) = measure_us!(self.flush_cache(false)); + self.flush_cache_no_tick_us += flush_cache_us; flush_cache_res?; let working_bank = self @@ -967,30 +957,26 @@ impl PohRecorder { return Err(PohRecorderError::MaxHeightReached); } - let (mut poh_lock, poh_lock_time) = measure_time!(self.poh.lock().unwrap(), "poh_lock"); - self.record_lock_contention_us += poh_lock_time.as_us(); + let (mut poh_lock, poh_lock_us) = measure_us!(self.poh.lock().unwrap()); + self.record_lock_contention_us += poh_lock_us; - let (record_mixin_res, record_mixin_time) = - measure_time!(poh_lock.record(mixin), "record_mixin"); - self.record_us += record_mixin_time.as_us(); + let (record_mixin_res, record_mixin_us) = measure_us!(poh_lock.record(mixin)); + self.record_us += record_mixin_us; drop(poh_lock); if let Some(poh_entry) = record_mixin_res { let num_transactions = transactions.len(); - let (send_entry_res, send_entry_time) = measure_time!( - { - let entry = Entry { - num_hashes: poh_entry.num_hashes, - hash: poh_entry.hash, - transactions, - }; - let bank_clone = working_bank.bank.clone(); - self.sender.send((bank_clone, (entry, self.tick_height))) - }, - "send_poh_entry", - ); - self.send_entry_us += send_entry_time.as_us(); + let (send_entry_res, send_entry_us) = measure_us!({ + let entry = Entry { + num_hashes: poh_entry.num_hashes, + hash: poh_entry.hash, + transactions, + }; + let bank_clone = working_bank.bank.clone(); + self.sender.send((bank_clone, (entry, self.tick_height))) + }); + self.send_entry_us += send_entry_us; send_entry_res?; let starting_transaction_index = working_bank.transaction_index.map(|transaction_index| { diff --git a/poh/src/poh_service.rs b/poh/src/poh_service.rs index b84ade297b1ac8..ed05b766c47ed5 100644 --- a/poh/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -5,7 +5,7 @@ use { crossbeam_channel::Receiver, log::*, solana_entry::poh::Poh, - solana_measure::{measure::Measure, measure_time}, + solana_measure::{measure::Measure, measure_us}, solana_sdk::poh_config::PohConfig, std::{ sync::{ @@ -261,9 +261,8 @@ impl PohService { std::mem::take(&mut record.transactions), ); // what do we do on failure here? Ignore for now. - let (_send_res, send_record_result_time) = - measure_time!(record.sender.send(res), "send_record_result"); - timing.total_send_record_result_us += send_record_result_time.as_us(); + let (_send_res, send_record_result_us) = measure_us!(record.sender.send(res)); + timing.total_send_record_result_us += send_record_result_us; timing.num_hashes += 1; // note: may have also ticked inside record let new_record_result = record_receiver.try_recv(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e0396c756ce2a2..7898831773ad18 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1355,74 +1355,60 @@ impl Bank { ) { let epoch = self.epoch(); let slot = self.slot(); - let (thread_pool, thread_pool_time) = measure_time!( - ThreadPoolBuilder::new() - .thread_name(|i| format!("solBnkNewEpch{i:02}")) - .build() - .expect("new rayon threadpool"), - "thread_pool_creation", - ); + let (thread_pool, thread_pool_time_us) = measure_us!(ThreadPoolBuilder::new() + .thread_name(|i| format!("solBnkNewEpch{i:02}")) + .build() + .expect("new rayon threadpool")); - let (_, apply_feature_activations_time) = measure_time!( - self.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false), - "apply_feature_activation", + let (_, apply_feature_activations_time_us) = measure_us!( + self.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false) ); // Add new entry to stakes.stake_history, set appropriate epoch and // update vote accounts with warmed up stakes before saving a // snapshot of stakes in epoch stakes - let (_, activate_epoch_time) = measure_time!( - self.stakes_cache.activate_epoch( - epoch, - &thread_pool, - self.new_warmup_cooldown_rate_epoch() - ), - "activate_epoch", - ); + let (_, activate_epoch_time_us) = measure_us!(self.stakes_cache.activate_epoch( + epoch, + &thread_pool, + self.new_warmup_cooldown_rate_epoch() + )); // Save a snapshot of stakes for use in consensus and stake weighted networking let leader_schedule_epoch = self.epoch_schedule.get_leader_schedule_epoch(slot); - let (_, update_epoch_stakes_time) = measure_time!( - self.update_epoch_stakes(leader_schedule_epoch), - "update_epoch_stakes", - ); + let (_, update_epoch_stakes_time_us) = + measure_us!(self.update_epoch_stakes(leader_schedule_epoch)); let mut rewards_metrics = RewardsMetrics::default(); // After saving a snapshot of stakes, apply stake rewards and commission - let (_, update_rewards_with_thread_pool_time) = measure_time!( - { - if self.is_partitioned_rewards_code_enabled() { - self.begin_partitioned_rewards( - reward_calc_tracer, - &thread_pool, - parent_epoch, - parent_slot, - parent_height, - &mut rewards_metrics, - ); - } else { - self.update_rewards_with_thread_pool( - parent_epoch, - reward_calc_tracer, - &thread_pool, - &mut rewards_metrics, - ) - } - }, - "update_rewards_with_thread_pool", - ); + let (_, update_rewards_with_thread_pool_time_us) = + measure_us!(if self.is_partitioned_rewards_code_enabled() { + self.begin_partitioned_rewards( + reward_calc_tracer, + &thread_pool, + parent_epoch, + parent_slot, + parent_height, + &mut rewards_metrics, + ); + } else { + self.update_rewards_with_thread_pool( + parent_epoch, + reward_calc_tracer, + &thread_pool, + &mut rewards_metrics, + ) + }); report_new_epoch_metrics( epoch, slot, parent_slot, NewEpochTimings { - thread_pool_time_us: thread_pool_time.as_us(), - apply_feature_activations_time_us: apply_feature_activations_time.as_us(), - activate_epoch_time_us: activate_epoch_time.as_us(), - update_epoch_stakes_time_us: update_epoch_stakes_time.as_us(), - update_rewards_with_thread_pool_time_us: update_rewards_with_thread_pool_time - .as_us(), + thread_pool_time_us, + apply_feature_activations_time_us, + activate_epoch_time_us, + update_epoch_stakes_time_us, + update_rewards_with_thread_pool_time_us, }, rewards_metrics, ); @@ -2218,7 +2204,7 @@ impl Bank { solana_stake_program::get_minimum_delegation(&self.feature_set) .max(LAMPORTS_PER_SOL); - let (stake_delegations, filter_timer) = measure_time!(stakes + let (stake_delegations, filter_time_us) = measure_us!(stakes .stake_delegations() .iter() .filter(|(_stake_pubkey, cached_stake_account)| { @@ -2228,7 +2214,7 @@ impl Bank { datapoint_info!( "stake_account_filter_time", - ("filter_time_us", filter_timer.as_us(), i64), + ("filter_time_us", filter_time_us, i64), ("num_stake_delegations_before", num_stake_delegations, i64), ("num_stake_delegations_after", stake_delegations.len(), i64) ); @@ -2409,13 +2395,13 @@ impl Bank { invalid_vote_keys, vote_accounts_cache_miss_count, }, - measure, - ) = measure_time!({ + load_vote_and_stake_accounts_us, + ) = measure_us!({ self._load_vote_and_stake_accounts(thread_pool, reward_calc_tracer.as_ref()) }); metrics .load_vote_and_stake_accounts_us - .fetch_add(measure.as_us(), Relaxed); + .fetch_add(load_vote_and_stake_accounts_us, Relaxed); metrics.vote_accounts_cache_miss_count += vote_accounts_cache_miss_count; self.stakes_cache .handle_invalid_keys(invalid_vote_keys, self.slot()); @@ -2431,7 +2417,7 @@ impl Bank { metrics: &RewardsMetrics, ) -> Option { let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch(); - let (points, measure) = measure_time!(thread_pool.install(|| { + let (points, calculate_points_us) = measure_us!(thread_pool.install(|| { vote_with_stake_delegations_map .par_iter() .map(|entry| { @@ -2458,7 +2444,7 @@ impl Bank { })); metrics .calculate_points_us - .fetch_add(measure.as_us(), Relaxed); + .fetch_add(calculate_points_us, Relaxed); (points > 0).then_some(PointValue { rewards, points }) } @@ -2500,7 +2486,7 @@ impl Bank { }, ); - let (stake_rewards, measure) = measure_time!(thread_pool.install(|| { + let (stake_rewards, redeem_rewards_us) = measure_us!(thread_pool.install(|| { stake_delegation_iterator .filter_map(|(vote_pubkey, vote_state, (stake_pubkey, stake_account))| { // curry closure to add the contextual stake_pubkey @@ -2556,7 +2542,7 @@ impl Bank { }) .collect() })); - metrics.redeem_rewards_us += measure.as_us(); + metrics.redeem_rewards_us += redeem_rewards_us; (vote_account_rewards, stake_rewards) } @@ -2591,7 +2577,7 @@ impl Bank { vote_account_rewards: VoteRewards, metrics: &RewardsMetrics, ) -> Vec<(Pubkey, RewardInfo)> { - let (vote_rewards, measure) = measure_time!(vote_account_rewards + let (vote_rewards, store_vote_accounts_us) = measure_us!(vote_account_rewards .into_iter() .filter_map( |( @@ -2627,7 +2613,7 @@ impl Bank { metrics .store_vote_accounts_us - .fetch_add(measure.as_us(), Relaxed); + .fetch_add(store_vote_accounts_us, Relaxed); vote_rewards } @@ -4375,13 +4361,13 @@ impl Bank { .test_skip_rewrites_but_include_in_bank_hash; let mut skipped_rewrites = Vec::default(); for (pubkey, account, _loaded_slot) in accounts.iter_mut() { - let (rent_collected_info, measure) = measure_time!(collect_rent_from_account( + let (rent_collected_info, collect_rent_us) = measure_us!(collect_rent_from_account( &self.feature_set, &self.rent_collector, pubkey, account )); - time_collecting_rent_us += measure.as_us(); + time_collecting_rent_us += collect_rent_us; // only store accounts where we collected rent // but get the hash for all these accounts even if collected rent is 0 (= not updated). @@ -4434,9 +4420,9 @@ impl Bank { if !accounts_to_store.is_empty() { // TODO: Maybe do not call `store_accounts()` here. Instead return `accounts_to_store` // and have `collect_rent_in_partition()` perform all the stores. - let (_, measure) = - measure_time!(self.store_accounts((self.slot(), &accounts_to_store[..],))); - time_storing_accounts_us += measure.as_us(); + let (_, store_accounts_us) = + measure_us!(self.store_accounts((self.slot(), &accounts_to_store[..]))); + time_storing_accounts_us += store_accounts_us; } CollectRentFromAccountsInfo { @@ -5476,7 +5462,7 @@ impl Bank { /// If the epoch accounts hash should be included in this Bank, then fetch it. If the EAH /// calculation has not completed yet, this fn will block until it does complete. fn wait_get_epoch_accounts_hash(&self) -> EpochAccountsHash { - let (epoch_accounts_hash, measure) = measure_time!(self + let (epoch_accounts_hash, waiting_time_us) = measure_us!(self .rc .accounts .accounts_db @@ -5485,8 +5471,8 @@ impl Bank { datapoint_info!( "bank-wait_get_epoch_accounts_hash", - ("slot", self.slot() as i64, i64), - ("waiting-time-us", measure.as_us() as i64, i64), + ("slot", self.slot(), i64), + ("waiting-time-us", waiting_time_us, i64), ); epoch_accounts_hash } @@ -6608,7 +6594,7 @@ impl Bank { return None; } - let (epoch_accounts_hash, measure) = measure_time!(self + let (epoch_accounts_hash, waiting_time_us) = measure_us!(self .rc .accounts .accounts_db @@ -6618,7 +6604,7 @@ impl Bank { datapoint_info!( "bank-get_epoch_accounts_hash_to_serialize", ("slot", self.slot(), i64), - ("waiting-time-us", measure.as_us(), i64), + ("waiting-time-us", waiting_time_us, i64), ); Some(epoch_accounts_hash) } diff --git a/runtime/src/prioritization_fee.rs b/runtime/src/prioritization_fee.rs index 72a7cac4056182..b0e3480c4a6569 100644 --- a/runtime/src/prioritization_fee.rs +++ b/runtime/src/prioritization_fee.rs @@ -1,5 +1,5 @@ use { - solana_measure::measure_time, + solana_measure::measure_us, solana_sdk::{clock::Slot, pubkey::Pubkey, saturating_add_assign}, std::collections::HashMap, }; @@ -166,35 +166,31 @@ impl Default for PrioritizationFee { impl PrioritizationFee { /// Update self for minimum transaction fee in the block and minimum fee for each writable account. pub fn update(&mut self, transaction_fee: u64, writable_accounts: Vec) { - let (_, update_time) = measure_time!( - { - if !self.is_finalized { - if transaction_fee < self.min_transaction_fee { - self.min_transaction_fee = transaction_fee; - } - - for write_account in writable_accounts { - self.min_writable_account_fees - .entry(write_account) - .and_modify(|write_lock_fee| { - *write_lock_fee = std::cmp::min(*write_lock_fee, transaction_fee) - }) - .or_insert(transaction_fee); - } - - self.metrics - .accumulate_total_prioritization_fee(transaction_fee); - self.metrics.update_prioritization_fee(transaction_fee); - } else { - self.metrics - .increment_attempted_update_on_finalized_fee_count(1); + let (_, update_us) = measure_us!({ + if !self.is_finalized { + if transaction_fee < self.min_transaction_fee { + self.min_transaction_fee = transaction_fee; } - }, - "update_time", - ); - self.metrics - .accumulate_total_update_elapsed_us(update_time.as_us()); + for write_account in writable_accounts { + self.min_writable_account_fees + .entry(write_account) + .and_modify(|write_lock_fee| { + *write_lock_fee = std::cmp::min(*write_lock_fee, transaction_fee) + }) + .or_insert(transaction_fee); + } + + self.metrics + .accumulate_total_prioritization_fee(transaction_fee); + self.metrics.update_prioritization_fee(transaction_fee); + } else { + self.metrics + .increment_attempted_update_on_finalized_fee_count(1); + } + }); + + self.metrics.accumulate_total_update_elapsed_us(update_us); } /// Accounts that have minimum fees lesser or equal to the minimum fee in the block are redundant, they are diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index bcbaf7e714adc6..2cc5489b1a054d 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -2,7 +2,7 @@ use { crate::{bank::Bank, compute_budget_details::GetComputeBudgetDetails, prioritization_fee::*}, crossbeam_channel::{unbounded, Receiver, Sender}, log::*, - solana_measure::measure_time, + solana_measure::measure_us, solana_sdk::{ clock::{BankId, Slot}, pubkey::Pubkey, @@ -193,59 +193,56 @@ impl PrioritizationFeeCache { /// transactions have both valid compute_budget_details and account_locks will be used to update /// fee_cache asynchronously. pub fn update<'a>(&self, bank: &Bank, txs: impl Iterator) { - let (_, send_updates_time) = measure_time!( - { - for sanitized_transaction in txs { - // Vote transactions are not prioritized, therefore they are excluded from - // updating fee_cache. - if sanitized_transaction.is_simple_vote_transaction() { - continue; - } - - let round_compute_unit_price_enabled = false; // TODO: bank.feture_set.is_active(round_compute_unit_price) - let compute_budget_details = sanitized_transaction - .get_compute_budget_details(round_compute_unit_price_enabled); - let account_locks = sanitized_transaction - .get_account_locks(bank.get_transaction_account_lock_limit()); + let (_, send_updates_us) = measure_us!({ + for sanitized_transaction in txs { + // Vote transactions are not prioritized, therefore they are excluded from + // updating fee_cache. + if sanitized_transaction.is_simple_vote_transaction() { + continue; + } - if compute_budget_details.is_none() || account_locks.is_err() { - continue; - } - let compute_budget_details = compute_budget_details.unwrap(); + let round_compute_unit_price_enabled = false; // TODO: bank.feture_set.is_active(round_compute_unit_price) + let compute_budget_details = sanitized_transaction + .get_compute_budget_details(round_compute_unit_price_enabled); + let account_locks = sanitized_transaction + .get_account_locks(bank.get_transaction_account_lock_limit()); - // filter out any transaction that requests zero compute_unit_limit - // since its priority fee amount is not instructive - if compute_budget_details.compute_unit_limit == 0 { - continue; - } + if compute_budget_details.is_none() || account_locks.is_err() { + continue; + } + let compute_budget_details = compute_budget_details.unwrap(); - let writable_accounts = account_locks - .unwrap() - .writable - .iter() - .map(|key| **key) - .collect::>(); - - self.sender - .send(CacheServiceUpdate::TransactionUpdate { - slot: bank.slot(), - bank_id: bank.bank_id(), - transaction_fee: compute_budget_details.compute_unit_price, - writable_accounts, - }) - .unwrap_or_else(|err| { - warn!( - "prioritization fee cache transaction updates failed: {:?}", - err - ); - }); + // filter out any transaction that requests zero compute_unit_limit + // since its priority fee amount is not instructive + if compute_budget_details.compute_unit_limit == 0 { + continue; } - }, - "send_updates", - ); + + let writable_accounts = account_locks + .unwrap() + .writable + .iter() + .map(|key| **key) + .collect::>(); + + self.sender + .send(CacheServiceUpdate::TransactionUpdate { + slot: bank.slot(), + bank_id: bank.bank_id(), + transaction_fee: compute_budget_details.compute_unit_price, + writable_accounts, + }) + .unwrap_or_else(|err| { + warn!( + "prioritization fee cache transaction updates failed: {:?}", + err + ); + }); + } + }); self.metrics - .accumulate_total_update_elapsed_us(send_updates_time.as_us()); + .accumulate_total_update_elapsed_us(send_updates_us); } /// Finalize prioritization fee when it's bank is completely replayed from blockstore, @@ -270,18 +267,13 @@ impl PrioritizationFeeCache { writable_accounts: Vec, metrics: &PrioritizationFeeCacheMetrics, ) { - let (_, entry_update_time) = measure_time!( - { - unfinalized - .entry(slot) - .or_default() - .entry(bank_id) - .or_default() - .update(transaction_fee, writable_accounts); - }, - "entry_update_time" - ); - metrics.accumulate_total_entry_update_elapsed_us(entry_update_time.as_us()); + let (_, entry_update_us) = measure_us!(unfinalized + .entry(slot) + .or_default() + .entry(bank_id) + .or_default() + .update(transaction_fee, writable_accounts)); + metrics.accumulate_total_entry_update_elapsed_us(entry_update_us); metrics.accumulate_successful_transaction_update_count(1); } @@ -300,57 +292,51 @@ impl PrioritizationFeeCache { // prune cache by evicting write account entry from prioritization fee if its fee is less // or equal to block's minimum transaction fee, because they are irrelevant in calculating // block minimum fee. - let (slot_prioritization_fee, slot_finalize_time) = measure_time!( - { - // remove unfinalized slots - *unfinalized = unfinalized - .split_off(&slot.checked_sub(MAX_UNFINALIZED_SLOTS).unwrap_or_default()); - - let Some(mut slot_prioritization_fee) = unfinalized.remove(&slot) else { - return; - }; - - // Only retain priority fee reported from optimistically confirmed bank - let pre_purge_bank_count = slot_prioritization_fee.len() as u64; - let mut prioritization_fee = slot_prioritization_fee.remove(&bank_id); - let post_purge_bank_count = prioritization_fee.as_ref().map(|_| 1).unwrap_or(0); - metrics.accumulate_total_purged_duplicated_bank_count( - pre_purge_bank_count.saturating_sub(post_purge_bank_count), - ); - // It should be rare that optimistically confirmed bank had no prioritized - // transactions, but duplicated and unconfirmed bank had. - if pre_purge_bank_count > 0 && post_purge_bank_count == 0 { - warn!("Finalized bank has empty prioritization fee cache. slot {slot} bank id {bank_id}"); - } + let (slot_prioritization_fee, slot_finalize_us) = measure_us!({ + // remove unfinalized slots + *unfinalized = + unfinalized.split_off(&slot.checked_sub(MAX_UNFINALIZED_SLOTS).unwrap_or_default()); + + let Some(mut slot_prioritization_fee) = unfinalized.remove(&slot) else { + return; + }; + + // Only retain priority fee reported from optimistically confirmed bank + let pre_purge_bank_count = slot_prioritization_fee.len() as u64; + let mut prioritization_fee = slot_prioritization_fee.remove(&bank_id); + let post_purge_bank_count = prioritization_fee.as_ref().map(|_| 1).unwrap_or(0); + metrics.accumulate_total_purged_duplicated_bank_count( + pre_purge_bank_count.saturating_sub(post_purge_bank_count), + ); + // It should be rare that optimistically confirmed bank had no prioritized + // transactions, but duplicated and unconfirmed bank had. + if pre_purge_bank_count > 0 && post_purge_bank_count == 0 { + warn!("Finalized bank has empty prioritization fee cache. slot {slot} bank id {bank_id}"); + } - if let Some(prioritization_fee) = &mut prioritization_fee { - if let Err(err) = prioritization_fee.mark_block_completed() { - error!( - "Unsuccessful finalizing slot {slot}, bank ID {bank_id}: {:?}", - err - ); - } - prioritization_fee.report_metrics(slot); + if let Some(prioritization_fee) = &mut prioritization_fee { + if let Err(err) = prioritization_fee.mark_block_completed() { + error!( + "Unsuccessful finalizing slot {slot}, bank ID {bank_id}: {:?}", + err + ); } - prioritization_fee - }, - "slot_finalize_time" - ); - metrics.accumulate_total_block_finalize_elapsed_us(slot_finalize_time.as_us()); + prioritization_fee.report_metrics(slot); + } + prioritization_fee + }); + metrics.accumulate_total_block_finalize_elapsed_us(slot_finalize_us); // Create new cache entry if let Some(slot_prioritization_fee) = slot_prioritization_fee { - let (_, cache_lock_time) = measure_time!( - { - let mut cache = cache.write().unwrap(); - while cache.len() >= cache_max_size { - cache.pop_first(); - } - cache.insert(slot, slot_prioritization_fee); - }, - "cache_lock_time" - ); - metrics.accumulate_total_cache_lock_elapsed_us(cache_lock_time.as_us()); + let (_, cache_lock_us) = measure_us!({ + let mut cache = cache.write().unwrap(); + while cache.len() >= cache_max_size { + cache.pop_first(); + } + cache.insert(slot, slot_prioritization_fee); + }); + metrics.accumulate_total_cache_lock_elapsed_us(cache_lock_us); } } diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 14b5b08a94a3c9..461410b6a5bf53 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -32,7 +32,7 @@ use { shared_buffer_reader::{SharedBuffer, SharedBufferReader}, utils::{move_and_async_delete_path, ACCOUNTS_RUN_DIR, ACCOUNTS_SNAPSHOT_DIR}, }, - solana_measure::{measure::Measure, measure_time}, + solana_measure::{measure::Measure, measure_time, measure_us}, solana_sdk::{ clock::{Epoch, Slot}, hash::Hash, @@ -857,7 +857,7 @@ fn serialize_snapshot( bank_snapshot_path.display(), ); - let (_, measure_flush) = measure_time!({ + let (_, flush_storages_us) = measure_us!({ for storage in snapshot_storages { storage.flush().map_err(|err| { AddBankSnapshotError::FlushStorage(err, storage.path().to_path_buf()) @@ -869,7 +869,7 @@ fn serialize_snapshot( // constructing a bank from this directory. It acts like an archive to include the full state. // The set of the account storages files is the necessary part of this snapshot state. Hard-link them // from the operational accounts/ directory to here. - let (_, measure_hard_linking) = measure_time!(hard_link_storages_to_snapshot( + let (_, hard_link_storages_us) = measure_us!(hard_link_storages_to_snapshot( &bank_snapshot_dir, slot, snapshot_storages @@ -901,13 +901,13 @@ fn serialize_snapshot( ); let status_cache_path = bank_snapshot_dir.join(SNAPSHOT_STATUS_CACHE_FILENAME); - let (status_cache_consumed_size, status_cache_serialize) = measure_time!( + let (status_cache_consumed_size, status_cache_serialize_us) = measure_us!( snapshot_bank_utils::serialize_status_cache(slot_deltas, &status_cache_path) .map_err(|err| AddBankSnapshotError::SerializeStatusCache(Box::new(err)))? ); let version_path = bank_snapshot_dir.join(SNAPSHOT_VERSION_FILENAME); - let (_, measure_write_version_file) = measure_time!(fs::write( + let (_, write_version_file_us) = measure_us!(fs::write( &version_path, snapshot_version.as_str().as_bytes(), ) @@ -915,8 +915,8 @@ fn serialize_snapshot( // Mark this directory complete so it can be used. Check this flag first before selecting for deserialization. let state_complete_path = bank_snapshot_dir.join(SNAPSHOT_STATE_COMPLETE_FILENAME); - let (_, measure_write_state_complete_file) = - measure_time!(fs::File::create(&state_complete_path).map_err(|err| { + let (_, write_state_complete_file_us) = measure_us!(fs::File::create(&state_complete_path) + .map_err(|err| { AddBankSnapshotError::CreateStateCompleteFile(err, state_complete_path) })?); @@ -928,22 +928,14 @@ fn serialize_snapshot( ("slot", slot, i64), ("bank_size", bank_snapshot_consumed_size, i64), ("status_cache_size", status_cache_consumed_size, i64), - ("flush_storages_us", measure_flush.as_us(), i64), - ("hard_link_storages_us", measure_hard_linking.as_us(), i64), + ("flush_storages_us", flush_storages_us, i64), + ("hard_link_storages_us", hard_link_storages_us, i64), ("bank_serialize_us", bank_serialize.as_us(), i64), - ( - "status_cache_serialize_us", - status_cache_serialize.as_us(), - i64 - ), - ( - "write_version_file_us", - measure_write_version_file.as_us(), - i64 - ), + ("status_cache_serialize_us", status_cache_serialize_us, i64), + ("write_version_file_us", write_version_file_us, i64), ( "write_state_complete_file_us", - measure_write_state_complete_file.as_us(), + write_state_complete_file_us, i64 ), ("total_us", measure_everything.as_us(), i64), diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index dedbfaed48f149..c3600772b80091 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -25,7 +25,7 @@ use { }, solana_loader_v4_program::create_program_runtime_environment_v2, solana_log_collector::LogCollector, - solana_measure::{measure::Measure, measure_time}, + solana_measure::{measure::Measure, measure_us}, solana_program_runtime::{ invoke_context::{EnvironmentConfig, InvokeContext}, loaded_programs::{ @@ -240,7 +240,7 @@ impl TransactionBatchProcessor { let mut error_metrics = TransactionErrorMetrics::default(); let mut execute_timings = ExecuteTimings::default(); - let (validation_results, validate_fees_time) = measure_time!(self.validate_fees( + let (validation_results, validate_fees_us) = measure_us!(self.validate_fees( callbacks, sanitized_txs, check_results, @@ -254,40 +254,41 @@ impl TransactionBatchProcessor { &mut error_metrics )); - let mut program_cache_time = Measure::start("program_cache"); - let mut program_accounts_map = Self::filter_executable_program_accounts( - callbacks, - sanitized_txs, - &validation_results, - PROGRAM_OWNERS, - ); - for builtin_program in self.builtin_program_ids.read().unwrap().iter() { - program_accounts_map.insert(*builtin_program, 0); - } + let (program_cache_for_tx_batch, program_cache_us) = measure_us!({ + let mut program_accounts_map = Self::filter_executable_program_accounts( + callbacks, + sanitized_txs, + &validation_results, + PROGRAM_OWNERS, + ); + for builtin_program in self.builtin_program_ids.read().unwrap().iter() { + program_accounts_map.insert(*builtin_program, 0); + } - let program_cache_for_tx_batch = Rc::new(RefCell::new(self.replenish_program_cache( - callbacks, - &program_accounts_map, - config.check_program_modification_slot, - config.limit_to_load_programs, - ))); - - if program_cache_for_tx_batch.borrow().hit_max_limit { - const ERROR: TransactionError = TransactionError::ProgramCacheHitMaxLimit; - let loaded_transactions = vec![Err(ERROR); sanitized_txs.len()]; - let execution_results = - vec![TransactionExecutionResult::NotExecuted(ERROR); sanitized_txs.len()]; - return LoadAndExecuteSanitizedTransactionsOutput { - error_metrics, - execute_timings, - execution_results, - loaded_transactions, - }; - } - program_cache_time.stop(); + let program_cache_for_tx_batch = Rc::new(RefCell::new(self.replenish_program_cache( + callbacks, + &program_accounts_map, + config.check_program_modification_slot, + config.limit_to_load_programs, + ))); + + if program_cache_for_tx_batch.borrow().hit_max_limit { + const ERROR: TransactionError = TransactionError::ProgramCacheHitMaxLimit; + let loaded_transactions = vec![Err(ERROR); sanitized_txs.len()]; + let execution_results = + vec![TransactionExecutionResult::NotExecuted(ERROR); sanitized_txs.len()]; + return LoadAndExecuteSanitizedTransactionsOutput { + error_metrics, + execute_timings, + execution_results, + loaded_transactions, + }; + } + + program_cache_for_tx_batch + }); - let mut load_time = Measure::start("accounts_load"); - let mut loaded_transactions = load_accounts( + let (mut loaded_transactions, load_accounts_us) = measure_us!(load_accounts( callbacks, sanitized_txs, validation_results, @@ -298,47 +299,43 @@ impl TransactionBatchProcessor { .rent_collector .unwrap_or(&RentCollector::default()), &program_cache_for_tx_batch.borrow(), - ); - load_time.stop(); - - let mut execution_time = Measure::start("execution_time"); - - let execution_results: Vec = loaded_transactions - .iter_mut() - .zip(sanitized_txs.iter()) - .map(|(load_result, tx)| match load_result { - Err(e) => TransactionExecutionResult::NotExecuted(e.clone()), - Ok(loaded_transaction) => { - let result = self.execute_loaded_transaction( - tx, - loaded_transaction, - &mut execute_timings, - &mut error_metrics, - &mut program_cache_for_tx_batch.borrow_mut(), - environment, - config, - ); + )); - if let TransactionExecutionResult::Executed { - details, - programs_modified_by_tx, - } = &result - { - // Update batch specific cache of the loaded programs with the modifications - // made by the transaction, if it executed successfully. - if details.status.is_ok() { - program_cache_for_tx_batch - .borrow_mut() - .merge(programs_modified_by_tx); + let (execution_results, execution_us): (Vec, u64) = + measure_us!(loaded_transactions + .iter_mut() + .zip(sanitized_txs.iter()) + .map(|(load_result, tx)| match load_result { + Err(e) => TransactionExecutionResult::NotExecuted(e.clone()), + Ok(loaded_transaction) => { + let result = self.execute_loaded_transaction( + tx, + loaded_transaction, + &mut execute_timings, + &mut error_metrics, + &mut program_cache_for_tx_batch.borrow_mut(), + environment, + config, + ); + + if let TransactionExecutionResult::Executed { + details, + programs_modified_by_tx, + } = &result + { + // Update batch specific cache of the loaded programs with the modifications + // made by the transaction, if it executed successfully. + if details.status.is_ok() { + program_cache_for_tx_batch + .borrow_mut() + .merge(programs_modified_by_tx); + } } - } - result - } - }) - .collect(); - - execution_time.stop(); + result + } + }) + .collect()); // Skip eviction when there's no chance this particular tx batch has increased the size of // ProgramCache entries. Note that loaded_missing is deliberately defined, so that there's @@ -359,22 +356,17 @@ impl TransactionBatchProcessor { debug!( "load: {}us execute: {}us txs_len={}", - load_time.as_us(), - execution_time.as_us(), + load_accounts_us, + execution_us, sanitized_txs.len(), ); - execute_timings.saturating_add_in_place( - ExecuteTimingType::ValidateFeesUs, - validate_fees_time.as_us(), - ); - execute_timings.saturating_add_in_place( - ExecuteTimingType::ProgramCacheUs, - program_cache_time.as_us(), - ); - execute_timings.saturating_add_in_place(ExecuteTimingType::LoadUs, load_time.as_us()); execute_timings - .saturating_add_in_place(ExecuteTimingType::ExecuteUs, execution_time.as_us()); + .saturating_add_in_place(ExecuteTimingType::ValidateFeesUs, validate_fees_us); + execute_timings + .saturating_add_in_place(ExecuteTimingType::ProgramCacheUs, program_cache_us); + execute_timings.saturating_add_in_place(ExecuteTimingType::LoadUs, load_accounts_us); + execute_timings.saturating_add_in_place(ExecuteTimingType::ExecuteUs, execution_us); LoadAndExecuteSanitizedTransactionsOutput { error_metrics,