Skip to content

Commit

Permalink
Merge branch 'master' into stafik/resharding/state-cleanup-impl
Browse files Browse the repository at this point in the history
  • Loading branch information
staffik authored Jan 14, 2025
2 parents 66befc4 + 483a0e8 commit b58dfb7
Show file tree
Hide file tree
Showing 120 changed files with 1,276 additions and 822 deletions.
2 changes: 1 addition & 1 deletion .cargo/audit.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ ignore = [
# parking_lot depends on it.
"RUSTSEC-2024-0384",

# The derivative package is unmainained, but hard to replace right now
# The derivative package is unmaintained, but hard to replace right now
# because ark-poly depends on it.
"RUSTSEC-2024-0388"
]
2 changes: 1 addition & 1 deletion .config/nextest.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ slow-timeout = { period = "30m", terminate-after = 2, grace-period = "1s" }
[profile.ci]
slow-timeout = { period = "5s", terminate-after = 5, grace-period = "1s" }
default-filter = "not test(/^(.*::ultra_slow_test|ultra_slow_test)/)"
# Try a few times before failing the whole test suite on a potentially spurious tests.
# Try a few times before failing the whole test suite on potentially spurious tests.
# The hope is that people will fix the spurious tests as they encounter them locally...
retries = { backoff = "fixed", count = 3, delay = "1s" }
failure-output = "final"
Expand Down
34 changes: 34 additions & 0 deletions .github/workflows/neard_test_features_binary.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
name: Neard test features binary release

on:
workflow_dispatch:
inputs:
branch:
default: 'master'
description: "Nearcore branch to build and publish"
type: string
required: true

jobs:
binary-release:
name: "Build and publish neard binary"
runs-on: "ubuntu-20.04-16core"
environment: deploy
permissions:
id-token: write # required to use OIDC authentication

steps:
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::590184106962:role/GitHubActionsRunner
aws-region: us-west-1

- name: Checkout ${{ github.event.inputs.branch }} branch
if: ${{ github.event_name == 'workflow_dispatch'}}
uses: actions/checkout@v4
with:
ref: ${{ github.event.inputs.branch }}

- name: Neard binary build and upload to S3
run: ./scripts/binary_release.sh test-features-release
33 changes: 8 additions & 25 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ ripemd = "0.1.1"
rkyv = "0.8.0"
rlimit = "0.7"
rlp = "0.5.2"
rocksdb = { version = "0.21.0", default-features = false, features = [
rocksdb = { version = "0.22.0", default-features = false, features = [
"snappy",
"lz4",
"zstd",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ create table ft_transfers (
-- Size of the synthetic state at the beginning of the run in bytes.
size_state_bytes integer not null,

-- Bencmark results.
-- Benchmark results.

-- FT transfer transactions per second aggregated over the run of the
-- benchmark.
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/synth-bm/src/account.rs
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ pub async fn create_sub_accounts(args: &CreateSubAccountsArgs) -> anyhow::Result
// Await permit before sending the request to make channel buffer size a limit for the
// number of outstanding requests.
let permit = channel_tx.clone().reserve_owned().await.unwrap();
// The spawned task starts running immediately. Assume with interval between spanning them
// The spawned task starts running immediately. Assume an interval between spanning them
// this leads to transaction nonces hitting the node in order.
tokio::spawn(async move {
let res = client.call(request).await;
Expand Down
88 changes: 42 additions & 46 deletions chain/chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,6 @@ pub struct Chain {
pub(crate) orphans: OrphanBlockPool,
pub blocks_with_missing_chunks: MissingChunksPool<Orphan>,
genesis: Block,
pub transaction_validity_period: NumBlocks,
pub epoch_length: BlockHeightDelta,
/// Block economics, relevant to changes when new block must be produced.
pub block_economics_config: BlockEconomicsConfig,
Expand Down Expand Up @@ -365,7 +364,13 @@ impl Chain {
save_trie_changes: bool,
) -> Result<Chain, Error> {
let store = runtime_adapter.store();
let chain_store = ChainStore::new(store.clone(), chain_genesis.height, save_trie_changes);
let transaction_validity_period = chain_genesis.transaction_validity_period;
let chain_store = ChainStore::new(
store.clone(),
chain_genesis.height,
save_trie_changes,
transaction_validity_period,
);
let state_roots = get_genesis_state_roots(runtime_adapter.store())?
.expect("genesis should be initialized.");
let (genesis, _genesis_chunks) = Self::make_genesis_block(
Expand All @@ -392,7 +397,6 @@ impl Chain {
blocks_with_missing_chunks: MissingChunksPool::new(),
blocks_in_processing: BlocksInProcessing::new(),
genesis,
transaction_validity_period: chain_genesis.transaction_validity_period,
epoch_length: chain_genesis.epoch_length,
block_economics_config: BlockEconomicsConfig::from(chain_genesis),
doomslug_threshold_mode,
Expand Down Expand Up @@ -430,12 +434,14 @@ impl Chain {
chain_genesis,
state_roots.clone(),
)?;
let transaction_validity_period = chain_genesis.transaction_validity_period;

// Check if we have a head in the store, otherwise pick genesis block.
let mut chain_store = ChainStore::new(
runtime_adapter.store().clone(),
chain_genesis.height,
chain_config.save_trie_changes,
transaction_validity_period,
);
let mut store_update = chain_store.store_update();
let (block_head, header_head) = match store_update.head() {
Expand Down Expand Up @@ -540,7 +546,7 @@ impl Chain {
let head_protocol_version = epoch_manager.get_epoch_protocol_version(&tip.epoch_id)?;
let shard_uids_pending_resharding = epoch_manager
.get_shard_uids_pending_resharding(head_protocol_version, PROTOCOL_VERSION)?;
runtime_adapter.get_tries().load_mem_tries_for_enabled_shards(
runtime_adapter.get_tries().load_memtries_for_enabled_shards(
&tracked_shards,
&shard_uids_pending_resharding,
true,
Expand Down Expand Up @@ -580,7 +586,6 @@ impl Chain {
blocks_in_processing: BlocksInProcessing::new(),
invalid_blocks: LruCache::new(NonZeroUsize::new(INVALID_CHUNKS_POOL_SIZE).unwrap()),
genesis: genesis.clone(),
transaction_validity_period: chain_genesis.transaction_validity_period,
epoch_length: chain_genesis.epoch_length,
block_economics_config: BlockEconomicsConfig::from(chain_genesis),
doomslug_threshold_mode,
Expand Down Expand Up @@ -1545,7 +1550,6 @@ impl Chain {
self.epoch_manager.clone(),
self.runtime_adapter.clone(),
self.doomslug_threshold_mode,
self.transaction_validity_period,
)
}

Expand Down Expand Up @@ -2043,7 +2047,7 @@ impl Chain {

if self.epoch_manager.is_next_block_epoch_start(block.header().prev_hash())? {
// Keep in memory only these tries that we care about this or next epoch.
self.runtime_adapter.get_tries().retain_mem_tries(&shards_cares_this_or_next_epoch);
self.runtime_adapter.get_tries().retain_memtries(&shards_cares_this_or_next_epoch);
}

if let Err(err) = self.garbage_collect_state_transition_data(&block) {
Expand Down Expand Up @@ -3191,48 +3195,34 @@ impl Chain {
block: &Block,
prev_block_header: &BlockHeader,
chunk: &ShardChunk,
) -> Result<(), Error> {
) -> Result<Vec<bool>, Error> {
let protocol_version =
self.epoch_manager.get_epoch_protocol_version(block.header().epoch_id())?;

if checked_feature!(
let relaxed_chunk_validation = checked_feature!(
"protocol_feature_relaxed_chunk_validation",
RelaxedChunkValidation,
protocol_version
) {
return Ok(());
}
);

if !validate_transactions_order(chunk.transactions()) {
let merkle_paths =
Block::compute_chunk_headers_root(block.chunks().iter_deprecated()).1;
let epoch_id = block.header().epoch_id();
let shard_layout = self.epoch_manager.get_shard_layout(&epoch_id)?;
let shard_id = chunk.shard_id();
let shard_index = shard_layout.get_shard_index(shard_id)?;

let chunk_proof = ChunkProofs {
block_header: borsh::to_vec(&block.header()).expect("Failed to serialize"),
merkle_proof: merkle_paths[shard_index].clone(),
chunk: MaybeEncodedShardChunk::Decoded(chunk.clone()).into(),
};
return Err(Error::InvalidChunkProofs(Box::new(chunk_proof)));
}
if !relaxed_chunk_validation {
if !validate_transactions_order(chunk.transactions()) {
let merkle_paths =
Block::compute_chunk_headers_root(block.chunks().iter_deprecated()).1;
let epoch_id = block.header().epoch_id();
let shard_layout = self.epoch_manager.get_shard_layout(&epoch_id)?;
let shard_id = chunk.shard_id();
let shard_index = shard_layout.get_shard_index(shard_id)?;

if checked_feature!("stable", AccessKeyNonceRange, protocol_version) {
let transaction_validity_period = self.transaction_validity_period;
for transaction in chunk.transactions() {
self.chain_store()
.check_transaction_validity_period(
prev_block_header,
transaction.transaction.block_hash(),
transaction_validity_period,
)
.map_err(|_| Error::from(Error::InvalidTransactions))?;
let chunk_proof = ChunkProofs {
block_header: borsh::to_vec(&block.header()).expect("Failed to serialize"),
merkle_proof: merkle_paths[shard_index].clone(),
chunk: MaybeEncodedShardChunk::Decoded(chunk.clone()).into(),
};
return Err(Error::InvalidChunkProofs(Box::new(chunk_proof)));
}
};
}

Ok(())
self.chain_store().compute_transaction_validity(protocol_version, prev_block_header, chunk)
}

pub fn transaction_validity_check<'a>(
Expand All @@ -3241,11 +3231,7 @@ impl Chain {
) -> impl FnMut(&SignedTransaction) -> bool + 'a {
move |tx: &SignedTransaction| -> bool {
self.chain_store()
.check_transaction_validity_period(
&prev_block_header,
tx.transaction.block_hash(),
self.transaction_validity_period,
)
.check_transaction_validity_period(&prev_block_header, tx.transaction.block_hash())
.is_ok()
}
}
Expand Down Expand Up @@ -3794,7 +3780,8 @@ impl Chain {
}
})?;

self.validate_chunk_transactions(&block, prev_block.header(), &chunk)?;
let tx_valid_list =
self.validate_chunk_transactions(&block, prev_block.header(), &chunk)?;

// we can't use hash from the current block here yet because the incoming receipts
// for this block is not stored yet
Expand Down Expand Up @@ -3827,6 +3814,7 @@ impl Chain {
ShardUpdateReason::NewChunk(NewChunkData {
chunk_header: chunk_header.clone(),
transactions: chunk.transactions().to_vec(),
transaction_validity_check_results: tx_valid_list,
receipts,
block: block_context,
is_first_block_with_chunk_of_version,
Expand Down Expand Up @@ -3992,6 +3980,14 @@ impl Chain {
}
}
}

pub fn transaction_validity_period(&self) -> BlockHeightDelta {
self.chain_store.transaction_validity_period
}

pub fn set_transaction_validity_period(&mut self, to: BlockHeightDelta) {
self.chain_store.transaction_validity_period = to;
}
}

/// This method calculates the congestion info for the genesis chunks. It uses
Expand Down
Loading

0 comments on commit b58dfb7

Please sign in to comment.