diff --git a/.github/workflows/issue-metrics.yml b/.github/workflows/issue-metrics.yml new file mode 100644 index 00000000000..1065168cc56 --- /dev/null +++ b/.github/workflows/issue-metrics.yml @@ -0,0 +1,41 @@ +name: Monthly issue metrics + +on: + workflow_dispatch: + schedule: + - cron: '3 2 1 * *' + +permissions: + issues: write + +jobs: + monthly-issue-metrics: + name: past month issue metrics + runs-on: ubuntu-latest + steps: + - name: Get dates for last month + shell: bash + id: last-month + run: | + # Calculate the first day of the previous month + first_day=$(date -d "last month" +%Y-%m-01) + + # Calculate the last day of the previous month + last_day=$(date -d "$first_day +1 month -1 day" +%Y-%m-%d) + + #Set an environment variable with the date range + echo "$first_day..$last_day" + echo "LAST_MONTH=$first_day..$last_day" >> $GITHUB_OUTPUT + + - name: Run issue-metrics tool + uses: github/issue-metrics@v2 + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SEARCH_QUERY: 'repo:near/nearcore is:issue created:${{ steps.last-month.outputs.LAST_MONTH }}' + + - name: Create issue + uses: peter-evans/create-issue-from-file@v4 + with: + title: Monthly issue metrics report + token: ${{ secrets.GITHUB_TOKEN }} + content-filepath: ./issue_metrics.md diff --git a/.github/workflows/nightly_nayduck.yml b/.github/workflows/nightly_nayduck.yml index 6ddba8a473d..f692428f16b 100644 --- a/.github/workflows/nightly_nayduck.yml +++ b/.github/workflows/nightly_nayduck.yml @@ -1,6 +1,5 @@ name: Nightly Nayduck tests check on: - pull_request: merge_group: jobs: diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index e0b364a2142..a4216689100 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -1003,11 +1003,12 @@ impl Chain { *block_hash, GCMode::Canonical(tries.clone()), )?; - chain_store_update.clear_resharding_data( - self.runtime_adapter.as_ref(), - self.epoch_manager.as_ref(), - *block_hash, - )?; + // TODO(resharding): Call clear_resharding_data once we figure out what's wrong + // chain_store_update.clear_resharding_data( + // self.runtime_adapter.as_ref(), + // self.epoch_manager.as_ref(), + // *block_hash, + // )?; gc_blocks_remaining -= 1; } else { return Err(Error::GCError( diff --git a/chain/network/src/config_json.rs b/chain/network/src/config_json.rs index a1c10453da0..1d69a722a1f 100644 --- a/chain/network/src/config_json.rs +++ b/chain/network/src/config_json.rs @@ -60,11 +60,6 @@ fn default_peer_expiration_duration() -> Duration { Duration::from_secs(7 * 24 * 60 * 60) } -/// If non-zero - we'll skip sending tombstones during initial sync and for that many seconds after start. -fn default_skip_tombstones() -> i64 { - 0 -} - /// This is a list of public STUN servers provided by Google, /// which are known to have good availability. To avoid trusting /// a centralized entity (and DNS used for domain resolution), @@ -201,28 +196,11 @@ pub struct Config { pub experimental: ExperimentalConfig, } -fn default_tier1_enable_inbound() -> bool { - true -} -fn default_tier1_enable_outbound() -> bool { - true -} - -fn default_tier1_connect_interval() -> Duration { - Duration::from_secs(60) -} - -fn default_tier1_new_connections_per_attempt() -> u64 { - 50 -} - #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct ExperimentalConfig { // If true - don't allow any inbound connections. - #[serde(default)] pub inbound_disabled: bool, // If true - connect only to the boot nodes. - #[serde(default)] pub connect_only_to_boot_nodes: bool, // If greater than 0, then system will no longer send or receive tombstones @@ -230,28 +208,22 @@ pub struct ExperimentalConfig { // // The better name is `skip_tombstones_seconds`, but we keep send for // compatibility. - #[serde(default = "default_skip_tombstones")] pub skip_sending_tombstones_seconds: i64, /// See `near_network::config::Tier1::enable_inbound`. - #[serde(default = "default_tier1_enable_inbound")] pub tier1_enable_inbound: bool, /// See `near_network::config::Tier1::enable_outbound`. - #[serde(default = "default_tier1_enable_outbound")] pub tier1_enable_outbound: bool, /// See `near_network::config::Tier1::connect_interval`. - #[serde(default = "default_tier1_connect_interval")] pub tier1_connect_interval: Duration, /// See `near_network::config::Tier1::new_connections_per_attempt`. - #[serde(default = "default_tier1_new_connections_per_attempt")] pub tier1_new_connections_per_attempt: u64, /// See `NetworkConfig`. /// Fields set here will override the NetworkConfig fields. - #[serde(default)] pub network_config_overrides: NetworkConfigOverrides, } @@ -277,11 +249,11 @@ impl Default for ExperimentalConfig { ExperimentalConfig { inbound_disabled: false, connect_only_to_boot_nodes: false, - skip_sending_tombstones_seconds: default_skip_tombstones(), - tier1_enable_inbound: default_tier1_enable_inbound(), - tier1_enable_outbound: default_tier1_enable_outbound(), - tier1_connect_interval: default_tier1_connect_interval(), - tier1_new_connections_per_attempt: default_tier1_new_connections_per_attempt(), + skip_sending_tombstones_seconds: 0, + tier1_enable_inbound: true, + tier1_enable_outbound: true, + tier1_connect_interval: Duration::from_secs(60), + tier1_new_connections_per_attempt: 50, network_config_overrides: Default::default(), } } diff --git a/core/chain-configs/src/client_config.rs b/core/chain-configs/src/client_config.rs index a85c0f62e39..b61e64a2b57 100644 --- a/core/chain-configs/src/client_config.rs +++ b/core/chain-configs/src/client_config.rs @@ -30,19 +30,17 @@ pub const DEFAULT_STATE_SYNC_NUM_CONCURRENT_REQUESTS_ON_CATCHUP_EXTERNAL: u32 = /// Configuration for garbage collection. #[derive(Clone, Debug, serde::Serialize, serde::Deserialize, PartialEq)] +#[serde(default)] pub struct GCConfig { /// Maximum number of blocks to garbage collect at every garbage collection /// call. - #[serde(default = "default_gc_blocks_limit")] pub gc_blocks_limit: NumBlocks, /// Maximum number of height to go through at each garbage collection step /// when cleaning forks during garbage collection. - #[serde(default = "default_gc_fork_clean_step")] pub gc_fork_clean_step: u64, /// Number of epochs for which we keep store data. - #[serde(default = "default_gc_num_epochs_to_keep")] pub gc_num_epochs_to_keep: u64, } @@ -56,18 +54,6 @@ impl Default for GCConfig { } } -fn default_gc_blocks_limit() -> NumBlocks { - GCConfig::default().gc_blocks_limit -} - -fn default_gc_fork_clean_step() -> u64 { - GCConfig::default().gc_fork_clean_step -} - -fn default_gc_num_epochs_to_keep() -> u64 { - GCConfig::default().gc_num_epochs_to_keep() -} - impl GCConfig { pub fn gc_num_epochs_to_keep(&self) -> u64 { max(MIN_GC_NUM_EPOCHS_TO_KEEP, self.gc_num_epochs_to_keep) @@ -163,6 +149,7 @@ impl SyncConfig { } #[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug)] +#[serde(default)] pub struct StateSplitConfig { /// The soft limit on the size of a single batch. The batch size can be /// decreased if resharding is consuming too many resources and interfering diff --git a/core/primitives/src/shard_layout.rs b/core/primitives/src/shard_layout.rs index 7f2252b63c7..cce0b8bf031 100644 --- a/core/primitives/src/shard_layout.rs +++ b/core/primitives/src/shard_layout.rs @@ -160,10 +160,7 @@ impl ShardLayout { /// This is work in progress and the exact way of splitting is yet to be determined. pub fn get_simple_nightshade_layout_v2() -> ShardLayout { ShardLayout::v1( - // TODO(resharding) - find the right boundary to split shards in - // place of just "sweat". Likely somewhere in between near.social - // and sweatcoin. - vec!["aurora", "aurora-0", "kkuuue2akv_1630967379.near", "sweat"] + vec!["aurora", "aurora-0", "kkuuue2akv_1630967379.near", "tge-lockup.sweat"] .into_iter() .map(|s| s.parse().unwrap()) .collect(), @@ -611,7 +608,7 @@ mod tests { "aurora", "aurora-0", "kkuuue2akv_1630967379.near", - "sweat" + "tge-lockup.sweat" ], "shards_split_map": [ [ diff --git a/core/store/src/db/colddb.rs b/core/store/src/db/colddb.rs index 90aa9e7ac5d..57c2a2903d9 100644 --- a/core/store/src/db/colddb.rs +++ b/core/store/src/db/colddb.rs @@ -29,7 +29,7 @@ impl ColdDB { // Checks if the column is is the cold db and returns an error if not. fn check_is_in_colddb(col: DBCol) -> std::io::Result<()> { if !col.is_in_colddb() { - return Err(std::io::Error::new(std::io::ErrorKind::Other, Self::err_msg(col))); + return Err(std::io::Error::other(Self::err_msg(col))); } Ok(()) } diff --git a/core/store/src/db/rocksdb.rs b/core/store/src/db/rocksdb.rs index 35e73a6c156..0a6e0a12771 100644 --- a/core/store/src/db/rocksdb.rs +++ b/core/store/src/db/rocksdb.rs @@ -114,7 +114,7 @@ impl RocksDB { columns: &[DBCol], ) -> io::Result { let counter = instance_tracker::InstanceTracker::try_new(store_config.max_open_files) - .map_err(other_error)?; + .map_err(io::Error::other)?; let (db, db_opt) = Self::open_db(path, store_config, mode, temp, columns)?; let cf_handles = Self::get_cf_handles(&db, columns); Ok(Self { db, db_opt, cf_handles, _instance_tracker: counter }) @@ -144,7 +144,7 @@ impl RocksDB { } else { DB::open_cf_descriptors(&options, path, cf_descriptors) } - .map_err(into_other)?; + .map_err(io::Error::other)?; if cfg!(feature = "single_thread_rocksdb") { // These have to be set after open db let mut env = Env::new().unwrap(); @@ -200,7 +200,7 @@ impl RocksDB { } else if cfg!(debug_assertions) { panic!("The database instance isn’t setup to access {col}"); } else { - Err(other_error(format!("{col}: no such column"))) + Err(io::Error::other(format!("{col}: no such column"))) } } @@ -269,7 +269,7 @@ impl<'a> Iterator for RocksDBIterator<'a> { type Item = io::Result<(Box<[u8]>, Box<[u8]>)>; fn next(&mut self) -> Option { - Some(self.0.next()?.map_err(into_other)) + Some(self.0.next()?.map_err(io::Error::other)) } } @@ -314,7 +314,7 @@ impl Database for RocksDB { let result = self .db .get_pinned_cf_opt(self.cf_handle(col)?, key, &read_options) - .map_err(into_other)? + .map_err(io::Error::other)? .map(DBSlice::from_rocksdb_slice); timer.observe_duration(); Ok(result) @@ -366,7 +366,7 @@ impl Database for RocksDB { } DBOp::DeleteAll { col } => { let cf_handle = self.cf_handle(col)?; - let range = self.get_cf_key_range(cf_handle).map_err(into_other)?; + let range = self.get_cf_key_range(cf_handle).map_err(io::Error::other)?; if let Some(range) = range { batch.delete_range_cf(cf_handle, range.start(), range.end()); // delete_range_cf deletes ["begin_key", "end_key"), so need one more delete @@ -378,7 +378,7 @@ impl Database for RocksDB { } } } - self.db.write(batch).map_err(into_other) + self.db.write(batch).map_err(io::Error::other) } fn compact(&self) -> io::Result<()> { @@ -392,7 +392,7 @@ impl Database for RocksDB { // Need to iterator over all CFs because the normal `flush()` only // flushes the default column family. for col in DBCol::iter() { - self.db.flush_cf(self.cf_handle(col)?).map_err(into_other)?; + self.db.flush_cf(self.cf_handle(col)?).map_err(io::Error::other)?; } Ok(()) } @@ -640,14 +640,6 @@ fn parse_statistics( Ok(()) } -fn other_error(msg: String) -> io::Error { - io::Error::new(io::ErrorKind::Other, msg) -} - -fn into_other(error: rocksdb::Error) -> io::Error { - io::Error::new(io::ErrorKind::Other, error.into_string()) -} - /// Returns name of a RocksDB column family corresponding to given column. /// /// Historically we used `col##` names (with `##` being index of the column). diff --git a/core/store/src/db/rocksdb/instance_tracker.rs b/core/store/src/db/rocksdb/instance_tracker.rs index 784bbfd65c5..31d18880224 100644 --- a/core/store/src/db/rocksdb/instance_tracker.rs +++ b/core/store/src/db/rocksdb/instance_tracker.rs @@ -205,17 +205,13 @@ impl NoFile for RealNoFile { #[test] fn test_ensure_max_open_files_limit() { - fn other_error(msg: &str) -> std::io::Error { - super::other_error(msg.to_string()) - } - /// Mock implementation of NoFile interface. struct MockNoFile<'a>(&'a mut (u64, u64)); impl<'a> NoFile for MockNoFile<'a> { fn get(&self) -> std::io::Result<(u64, u64)> { if self.0 .0 == 666 { - Err(other_error("error")) + Err(std::io::ErrorKind::Other.into()) } else { Ok(*self.0) } @@ -224,7 +220,7 @@ fn test_ensure_max_open_files_limit() { fn set(&mut self, soft: u64, hard: u64) -> std::io::Result<()> { let (old_soft, old_hard) = self.get().unwrap(); if old_hard == 666000 { - Err(other_error("error")) + Err(std::io::ErrorKind::Other.into()) } else { assert!(soft != old_soft, "Pointless call to set"); *self.0 = (soft, hard); diff --git a/core/store/src/db/rocksdb/snapshot.rs b/core/store/src/db/rocksdb/snapshot.rs index d36c27ad95f..76be0806b55 100644 --- a/core/store/src/db/rocksdb/snapshot.rs +++ b/core/store/src/db/rocksdb/snapshot.rs @@ -51,7 +51,7 @@ impl std::convert::From for SnapshotError { impl std::convert::From<::rocksdb::Error> for SnapshotError { fn from(err: ::rocksdb::Error) -> Self { - super::into_other(err).into() + io::Error::other(err).into() } } @@ -94,7 +94,7 @@ impl Snapshot { } let db = super::RocksDB::open(db_path, config, crate::Mode::ReadWriteExisting, temp)?; - let cp = Checkpoint::new(&db.db).map_err(super::into_other)?; + let cp = Checkpoint::new(&db.db).map_err(io::Error::other)?; cp.create_checkpoint(&snapshot_path)?; Ok(Self(Some(snapshot_path))) diff --git a/core/store/src/flat/store_helper.rs b/core/store/src/flat/store_helper.rs index d09b9300469..3c4510634db 100644 --- a/core/store/src/flat/store_helper.rs +++ b/core/store/src/flat/store_helper.rs @@ -139,17 +139,13 @@ pub fn encode_flat_state_db_key(shard_uid: ShardUId, key: &[u8]) -> Vec { pub fn decode_flat_state_db_key(key: &[u8]) -> io::Result<(ShardUId, Vec)> { if key.len() < 8 { - return Err(io::Error::new( - io::ErrorKind::Other, - format!("expected FlatState key length to be at least 8: {key:?}"), - )); + return Err(io::Error::other(format!( + "expected FlatState key length to be at least 8: {key:?}" + ))); } let (shard_uid_bytes, trie_key) = key.split_at(8); let shard_uid = shard_uid_bytes.try_into().map_err(|err| { - io::Error::new( - io::ErrorKind::Other, - format!("failed to decode shard_uid as part of FlatState key: {err}"), - ) + io::Error::other(format!("failed to decode shard_uid as part of FlatState key: {err}")) })?; Ok((shard_uid, trie_key.to_vec())) } diff --git a/core/store/src/metadata.rs b/core/store/src/metadata.rs index eab2c05041c..30a25a20ef3 100644 --- a/core/store/src/metadata.rs +++ b/core/store/src/metadata.rs @@ -103,7 +103,7 @@ fn read( match result { Some(value) => Ok(value), - None => Err(other_error(format!("missing {what}; {msg}"))), + None => Err(std::io::Error::other(format!("missing {what}; {msg}"))), } } @@ -118,19 +118,12 @@ fn maybe_read( key: &[u8], ) -> std::io::Result> { let msg = "it’s not a neard database or database is corrupted"; - if let Some(bytes) = db.get_raw_bytes(crate::DBCol::DbVersion, key)? { - let value = std::str::from_utf8(&bytes) - .map_err(|_err| format!("invalid {what}: {bytes:?}; {msg}")) - .map_err(other_error)?; - let value = T::from_str(value) - .map_err(|_err| format!("invalid {what}: ‘{value}’; {msg}")) - .map_err(other_error)?; - Ok(Some(value)) - } else { - Ok(None) - } -} - -fn other_error(msg: String) -> std::io::Error { - std::io::Error::new(std::io::ErrorKind::Other, msg) + db.get_raw_bytes(crate::DBCol::DbVersion, key)? + .map(|bytes| { + let value = std::str::from_utf8(&bytes) + .map_err(|_err| format!("invalid {what}: {bytes:?}; {msg}"))?; + T::from_str(value).map_err(|_err| format!("invalid {what}: ‘{value}’; {msg}")) + }) + .transpose() + .map_err(std::io::Error::other) } diff --git a/core/store/src/opener.rs b/core/store/src/opener.rs index 6a5926758e5..a4c59e4637f 100644 --- a/core/store/src/opener.rs +++ b/core/store/src/opener.rs @@ -524,7 +524,7 @@ impl<'a> DBOpener<'a> { let metadata = DbMetadata::read(&db)?; if want_version != metadata.version { let msg = format!("unexpected DbVersion {}; expected {want_version}", metadata.version); - Err(std::io::Error::new(std::io::ErrorKind::Other, msg)) + Err(std::io::Error::other(msg)) } else { Ok((db, metadata)) } diff --git a/integration-tests/src/tests/client/resharding.rs b/integration-tests/src/tests/client/resharding.rs index 97cc92d2404..e8433ca398b 100644 --- a/integration-tests/src/tests/client/resharding.rs +++ b/integration-tests/src/tests/client/resharding.rs @@ -1059,11 +1059,13 @@ fn test_shard_layout_upgrade_gc_impl(resharding_type: ReshardingType, rng_seed: } #[test] +#[ignore] fn test_shard_layout_upgrade_gc() { test_shard_layout_upgrade_gc_impl(ReshardingType::V1, 44); } #[test] +#[ignore] fn test_shard_layout_upgrade_gc_v2() { // TODO(resharding) remove those checks once rolled out if checked_feature!("stable", SimpleNightshadeV2, PROTOCOL_VERSION) { diff --git a/nearcore/src/config.rs b/nearcore/src/config.rs index 0418c57cad6..e9a4023522e 100644 --- a/nearcore/src/config.rs +++ b/nearcore/src/config.rs @@ -302,19 +302,15 @@ pub struct Config { #[serde(skip_serializing_if = "Option::is_none")] pub save_trie_changes: Option, pub log_summary_style: LogSummaryStyle, - #[serde(default = "default_log_summary_period")] pub log_summary_period: Duration, // Allows more detailed logging, for example a list of orphaned blocks. pub enable_multiline_logging: Option, /// Garbage collection configuration. - #[serde(default, flatten)] + #[serde(flatten)] pub gc: GCConfig, - #[serde(default = "default_view_client_threads")] pub view_client_threads: usize, pub epoch_sync_enabled: bool, - #[serde(default = "default_view_client_throttle_period")] pub view_client_throttle_period: Duration, - #[serde(default = "default_trie_viewer_state_size_limit")] pub trie_viewer_state_size_limit: Option, /// If set, overrides value in genesis configuration. #[serde(skip_serializing_if = "Option::is_none")] @@ -323,14 +319,14 @@ pub struct Config { pub store: near_store::StoreConfig, /// Different parameters to configure underlying cold storage. /// This feature is under development, do not use in production. - #[serde(default, skip_serializing_if = "Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub cold_store: Option, /// Configuration for the split storage. - #[serde(default, skip_serializing_if = "Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub split_storage: Option, /// The node will stop after the head exceeds this height. /// The node usually stops within several seconds after reaching the target height. - #[serde(default, skip_serializing_if = "Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub expected_shutdown: Option, /// Whether to use state sync (unreliable and corrupts the DB if fails) or do a block sync instead. #[serde(skip_serializing_if = "Option::is_none")] @@ -344,7 +340,6 @@ pub struct Config { /// guarantees that the node will use bounded resources to store incoming transactions. /// Setting this value too low (<1MB) on the validator might lead to production of smaller /// chunks and underutilizing the capacity of the network. - #[serde(default = "default_transaction_pool_size_limit")] pub transaction_pool_size_limit: Option, pub state_split_config: StateSplitConfig, } diff --git a/pytest/tests/sanity/rpc_state_changes.py b/pytest/tests/sanity/rpc_state_changes.py index bf20b2be5a5..009bc9d08f7 100755 --- a/pytest/tests/sanity/rpc_state_changes.py +++ b/pytest/tests/sanity/rpc_state_changes.py @@ -54,9 +54,10 @@ def test_changes_with_new_account_with_access_key(): 4. Observe the changes in the block where the receipt lands. """ + base_account_id = nodes[0].signer_key.account_id # re-use the key as a new account access key new_key = Key( - account_id='rpc_key_value_changes_full_access', + account_id=f'rpc_key_value_changes.{base_account_id}', pk=nodes[1].signer_key.pk, sk=nodes[1].signer_key.sk, ) diff --git a/runtime/near-test-contracts/build.rs b/runtime/near-test-contracts/build.rs index e551f5ff07b..41e062e8bb6 100644 --- a/runtime/near-test-contracts/build.rs +++ b/runtime/near-test-contracts/build.rs @@ -1,14 +1,11 @@ -use std::path::Path; -use std::path::PathBuf; use std::process::Command; -use std::{env, fs, io, process}; type Error = Box; fn main() { if let Err(err) = try_main() { eprintln!("{}", err); - process::exit(1); + std::process::exit(1); } } @@ -39,14 +36,14 @@ fn build_contract(dir: &str, args: &[&str], output: &str) -> Result<(), Error> { let src = target_dir.join(format!("wasm32-unknown-unknown/release/{}.wasm", dir.replace('-', "_"))); - fs::copy(&src, format!("./res/{}.wasm", output)) + std::fs::copy(&src, format!("./res/{}.wasm", output)) .map_err(|err| format!("failed to copy `{}`: {}", src.display(), err))?; println!("cargo:rerun-if-changed=./{}/src/lib.rs", dir); println!("cargo:rerun-if-changed=./{}/Cargo.toml", dir); Ok(()) } -fn cargo_build_cmd(target_dir: &Path) -> Command { +fn cargo_build_cmd(target_dir: &std::path::Path) -> Command { let mut res = Command::new("cargo"); res.env_remove("CARGO_BUILD_RUSTFLAGS"); @@ -61,15 +58,18 @@ fn cargo_build_cmd(target_dir: &Path) -> Command { } fn check_status(mut cmd: Command) -> Result<(), Error> { - let status = cmd.status().map_err(|err| { - io::Error::new(io::ErrorKind::Other, format!("command `{:?}` failed to run: {}", cmd, err)) - })?; - if !status.success() { - return Err(format!("command `{:?}` exited with non-zero status: {:?}", cmd, status).into()); - } - Ok(()) + cmd.status() + .map_err(|err| format!("command `{cmd:?}` failed to run: {err}")) + .and_then(|status| { + if status.success() { + Ok(()) + } else { + Err(format!("command `{cmd:?}` exited with non-zero status: {status:?}")) + } + }) + .map_err(Error::from) } -fn out_dir() -> PathBuf { - env::var("OUT_DIR").unwrap().into() +fn out_dir() -> std::path::PathBuf { + std::env::var("OUT_DIR").unwrap().into() } diff --git a/test-utils/style/src/lib.rs b/test-utils/style/src/lib.rs index 125f9fa16b0..d0ce3832e94 100644 --- a/test-utils/style/src/lib.rs +++ b/test-utils/style/src/lib.rs @@ -6,23 +6,37 @@ use std::{ }; /// Add common cargo arguments for tests run by this code. -fn cargo_env(cmd: &mut Command) { +fn cargo_env(cmd: &mut Command, target_dir: Option<&str>) { // Set the working directory to the project root, rather than using whatever default nextest // gives us. let style_root = std::env::var_os("CARGO_MANIFEST_DIR").unwrap_or(OsString::from("./")); let wp_root: PathBuf = [&style_root, OsStr::new(".."), OsStr::new("..")].into_iter().collect(); cmd.current_dir(&wp_root); - // Use a different target directory to avoid invalidating any cache after tests are run (so - // that running `cargo nextest` twice does not rebuild half of the workspace on the 2nd - // rebuild. Unfortunately cargo itself does not readily expose this information to us, so we - // have to guess a little as to where this directory might end up. - // - // NB: We aren't using a temporary directory proper here in order to *allow* keeping cache - // between individual `clippy` runs and such. - let target_dir: PathBuf = - [wp_root.as_os_str(), OsStr::new("target"), OsStr::new("style")].into_iter().collect(); - cmd.env("CARGO_TARGET_DIR", target_dir.as_path()); + if let Some(tgt_dir) = target_dir { + // Use a different target directory to avoid invalidating any cache after tests are run (so + // that running `cargo nextest` twice does not rebuild half of the workspace on the 2nd + // rebuild. Unfortunately cargo itself does not readily expose this information to us, so + // we have to guess a little as to where this directory might end up. + // + // NB: We aren't using a temporary directory proper here in order to *allow* keeping cache + // between individual `clippy` runs and such. + let target_dir: PathBuf = + [wp_root.as_os_str(), OsStr::new("target"), OsStr::new(tgt_dir)].into_iter().collect(); + cmd.env("CARGO_TARGET_DIR", target_dir.as_path()); + } +} + +/// Create a cargo command. +/// +/// You will want to set `target_dir` to some unique `Some` value whenever there’s a chance that +/// this invocation of `cargo` will build any project code. Setting unique values avoids lock +/// contention and unintentional cache invalidation. +fn cargo(target_dir: Option<&str>) -> Command { + let cargo = std::env::var_os("CARGO").unwrap_or(OsString::from("cargo")); + let mut cmd = Command::new(cargo); + cargo_env(&mut cmd, target_dir); + cmd } fn ensure_success(mut cmd: std::process::Command) { @@ -38,36 +52,28 @@ fn ensure_success(mut cmd: std::process::Command) { #[test] fn rustfmt() { - let cargo = std::env::var_os("CARGO").unwrap_or(OsString::from("cargo")); - let mut cmd = Command::new(cargo); - cargo_env(&mut cmd); + let mut cmd = cargo(None); cmd.args(&["fmt", "--", "--check"]); ensure_success(cmd); } #[test] fn clippy() { - let cargo = std::env::var_os("CARGO").unwrap_or(OsString::from("cargo")); - let mut cmd = Command::new(cargo); - cargo_env(&mut cmd); + let mut cmd = cargo(Some("style")); cmd.args(&["clippy", "--all-targets", "--all-features", "--locked"]); ensure_success(cmd); } #[test] fn deny() { - let cargo = std::env::var_os("CARGO").unwrap_or(OsString::from("cargo")); - let mut cmd = Command::new(cargo); - cargo_env(&mut cmd); + let mut cmd = cargo(None); cmd.args(&["deny", "--all-features", "--locked", "check", "bans"]); ensure_success(cmd); } #[test] fn themis() { - let cargo = std::env::var_os("CARGO").unwrap_or(OsString::from("cargo")); - let mut cmd = Command::new(cargo); - cargo_env(&mut cmd); + let mut cmd = cargo(Some("themis")); cmd.args(&["run", "--locked", "-p", "themis"]); ensure_success(cmd); }