Skip to content

Commit

Permalink
Merge branch 'master' into fix-urls
Browse files Browse the repository at this point in the history
  • Loading branch information
Ekleog-NEAR authored Nov 20, 2023
2 parents e2e1764 + 4158229 commit e7b23c0
Show file tree
Hide file tree
Showing 18 changed files with 135 additions and 157 deletions.
41 changes: 41 additions & 0 deletions .github/workflows/issue-metrics.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
name: Monthly issue metrics

on:
workflow_dispatch:
schedule:
- cron: '3 2 1 * *'

permissions:
issues: write

jobs:
monthly-issue-metrics:
name: past month issue metrics
runs-on: ubuntu-latest
steps:
- name: Get dates for last month
shell: bash
id: last-month
run: |
# Calculate the first day of the previous month
first_day=$(date -d "last month" +%Y-%m-01)
# Calculate the last day of the previous month
last_day=$(date -d "$first_day +1 month -1 day" +%Y-%m-%d)
#Set an environment variable with the date range
echo "$first_day..$last_day"
echo "LAST_MONTH=$first_day..$last_day" >> $GITHUB_OUTPUT
- name: Run issue-metrics tool
uses: github/issue-metrics@v2
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SEARCH_QUERY: 'repo:near/nearcore is:issue created:${{ steps.last-month.outputs.LAST_MONTH }}'

- name: Create issue
uses: peter-evans/create-issue-from-file@v4
with:
title: Monthly issue metrics report
token: ${{ secrets.GITHUB_TOKEN }}
content-filepath: ./issue_metrics.md
1 change: 0 additions & 1 deletion .github/workflows/nightly_nayduck.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
name: Nightly Nayduck tests check
on:
pull_request:
merge_group:

jobs:
Expand Down
11 changes: 6 additions & 5 deletions chain/chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1003,11 +1003,12 @@ impl Chain {
*block_hash,
GCMode::Canonical(tries.clone()),
)?;
chain_store_update.clear_resharding_data(
self.runtime_adapter.as_ref(),
self.epoch_manager.as_ref(),
*block_hash,
)?;
// TODO(resharding): Call clear_resharding_data once we figure out what's wrong
// chain_store_update.clear_resharding_data(
// self.runtime_adapter.as_ref(),
// self.epoch_manager.as_ref(),
// *block_hash,
// )?;
gc_blocks_remaining -= 1;
} else {
return Err(Error::GCError(
Expand Down
38 changes: 5 additions & 33 deletions chain/network/src/config_json.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,6 @@ fn default_peer_expiration_duration() -> Duration {
Duration::from_secs(7 * 24 * 60 * 60)
}

/// If non-zero - we'll skip sending tombstones during initial sync and for that many seconds after start.
fn default_skip_tombstones() -> i64 {
0
}

/// This is a list of public STUN servers provided by Google,
/// which are known to have good availability. To avoid trusting
/// a centralized entity (and DNS used for domain resolution),
Expand Down Expand Up @@ -201,57 +196,34 @@ pub struct Config {
pub experimental: ExperimentalConfig,
}

fn default_tier1_enable_inbound() -> bool {
true
}
fn default_tier1_enable_outbound() -> bool {
true
}

fn default_tier1_connect_interval() -> Duration {
Duration::from_secs(60)
}

fn default_tier1_new_connections_per_attempt() -> u64 {
50
}

#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)]
pub struct ExperimentalConfig {
// If true - don't allow any inbound connections.
#[serde(default)]
pub inbound_disabled: bool,
// If true - connect only to the boot nodes.
#[serde(default)]
pub connect_only_to_boot_nodes: bool,

// If greater than 0, then system will no longer send or receive tombstones
// during sync and during that many seconds after startup.
//
// The better name is `skip_tombstones_seconds`, but we keep send for
// compatibility.
#[serde(default = "default_skip_tombstones")]
pub skip_sending_tombstones_seconds: i64,

/// See `near_network::config::Tier1::enable_inbound`.
#[serde(default = "default_tier1_enable_inbound")]
pub tier1_enable_inbound: bool,

/// See `near_network::config::Tier1::enable_outbound`.
#[serde(default = "default_tier1_enable_outbound")]
pub tier1_enable_outbound: bool,

/// See `near_network::config::Tier1::connect_interval`.
#[serde(default = "default_tier1_connect_interval")]
pub tier1_connect_interval: Duration,

/// See `near_network::config::Tier1::new_connections_per_attempt`.
#[serde(default = "default_tier1_new_connections_per_attempt")]
pub tier1_new_connections_per_attempt: u64,

/// See `NetworkConfig`.
/// Fields set here will override the NetworkConfig fields.
#[serde(default)]
pub network_config_overrides: NetworkConfigOverrides,
}

Expand All @@ -277,11 +249,11 @@ impl Default for ExperimentalConfig {
ExperimentalConfig {
inbound_disabled: false,
connect_only_to_boot_nodes: false,
skip_sending_tombstones_seconds: default_skip_tombstones(),
tier1_enable_inbound: default_tier1_enable_inbound(),
tier1_enable_outbound: default_tier1_enable_outbound(),
tier1_connect_interval: default_tier1_connect_interval(),
tier1_new_connections_per_attempt: default_tier1_new_connections_per_attempt(),
skip_sending_tombstones_seconds: 0,
tier1_enable_inbound: true,
tier1_enable_outbound: true,
tier1_connect_interval: Duration::from_secs(60),
tier1_new_connections_per_attempt: 50,
network_config_overrides: Default::default(),
}
}
Expand Down
17 changes: 2 additions & 15 deletions core/chain-configs/src/client_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,19 +30,17 @@ pub const DEFAULT_STATE_SYNC_NUM_CONCURRENT_REQUESTS_ON_CATCHUP_EXTERNAL: u32 =

/// Configuration for garbage collection.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, PartialEq)]
#[serde(default)]
pub struct GCConfig {
/// Maximum number of blocks to garbage collect at every garbage collection
/// call.
#[serde(default = "default_gc_blocks_limit")]
pub gc_blocks_limit: NumBlocks,

/// Maximum number of height to go through at each garbage collection step
/// when cleaning forks during garbage collection.
#[serde(default = "default_gc_fork_clean_step")]
pub gc_fork_clean_step: u64,

/// Number of epochs for which we keep store data.
#[serde(default = "default_gc_num_epochs_to_keep")]
pub gc_num_epochs_to_keep: u64,
}

Expand All @@ -56,18 +54,6 @@ impl Default for GCConfig {
}
}

fn default_gc_blocks_limit() -> NumBlocks {
GCConfig::default().gc_blocks_limit
}

fn default_gc_fork_clean_step() -> u64 {
GCConfig::default().gc_fork_clean_step
}

fn default_gc_num_epochs_to_keep() -> u64 {
GCConfig::default().gc_num_epochs_to_keep()
}

impl GCConfig {
pub fn gc_num_epochs_to_keep(&self) -> u64 {
max(MIN_GC_NUM_EPOCHS_TO_KEEP, self.gc_num_epochs_to_keep)
Expand Down Expand Up @@ -163,6 +149,7 @@ impl SyncConfig {
}

#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug)]
#[serde(default)]
pub struct StateSplitConfig {
/// The soft limit on the size of a single batch. The batch size can be
/// decreased if resharding is consuming too many resources and interfering
Expand Down
7 changes: 2 additions & 5 deletions core/primitives/src/shard_layout.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,10 +160,7 @@ impl ShardLayout {
/// This is work in progress and the exact way of splitting is yet to be determined.
pub fn get_simple_nightshade_layout_v2() -> ShardLayout {
ShardLayout::v1(
// TODO(resharding) - find the right boundary to split shards in
// place of just "sweat". Likely somewhere in between near.social
// and sweatcoin.
vec!["aurora", "aurora-0", "kkuuue2akv_1630967379.near", "sweat"]
vec!["aurora", "aurora-0", "kkuuue2akv_1630967379.near", "tge-lockup.sweat"]
.into_iter()
.map(|s| s.parse().unwrap())
.collect(),
Expand Down Expand Up @@ -611,7 +608,7 @@ mod tests {
"aurora",
"aurora-0",
"kkuuue2akv_1630967379.near",
"sweat"
"tge-lockup.sweat"
],
"shards_split_map": [
[
Expand Down
2 changes: 1 addition & 1 deletion core/store/src/db/colddb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ impl ColdDB {
// Checks if the column is is the cold db and returns an error if not.
fn check_is_in_colddb(col: DBCol) -> std::io::Result<()> {
if !col.is_in_colddb() {
return Err(std::io::Error::new(std::io::ErrorKind::Other, Self::err_msg(col)));
return Err(std::io::Error::other(Self::err_msg(col)));
}
Ok(())
}
Expand Down
24 changes: 8 additions & 16 deletions core/store/src/db/rocksdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ impl RocksDB {
columns: &[DBCol],
) -> io::Result<Self> {
let counter = instance_tracker::InstanceTracker::try_new(store_config.max_open_files)
.map_err(other_error)?;
.map_err(io::Error::other)?;
let (db, db_opt) = Self::open_db(path, store_config, mode, temp, columns)?;
let cf_handles = Self::get_cf_handles(&db, columns);
Ok(Self { db, db_opt, cf_handles, _instance_tracker: counter })
Expand Down Expand Up @@ -144,7 +144,7 @@ impl RocksDB {
} else {
DB::open_cf_descriptors(&options, path, cf_descriptors)
}
.map_err(into_other)?;
.map_err(io::Error::other)?;
if cfg!(feature = "single_thread_rocksdb") {
// These have to be set after open db
let mut env = Env::new().unwrap();
Expand Down Expand Up @@ -200,7 +200,7 @@ impl RocksDB {
} else if cfg!(debug_assertions) {
panic!("The database instance isn’t setup to access {col}");
} else {
Err(other_error(format!("{col}: no such column")))
Err(io::Error::other(format!("{col}: no such column")))
}
}

Expand Down Expand Up @@ -269,7 +269,7 @@ impl<'a> Iterator for RocksDBIterator<'a> {
type Item = io::Result<(Box<[u8]>, Box<[u8]>)>;

fn next(&mut self) -> Option<Self::Item> {
Some(self.0.next()?.map_err(into_other))
Some(self.0.next()?.map_err(io::Error::other))
}
}

Expand Down Expand Up @@ -314,7 +314,7 @@ impl Database for RocksDB {
let result = self
.db
.get_pinned_cf_opt(self.cf_handle(col)?, key, &read_options)
.map_err(into_other)?
.map_err(io::Error::other)?
.map(DBSlice::from_rocksdb_slice);
timer.observe_duration();
Ok(result)
Expand Down Expand Up @@ -366,7 +366,7 @@ impl Database for RocksDB {
}
DBOp::DeleteAll { col } => {
let cf_handle = self.cf_handle(col)?;
let range = self.get_cf_key_range(cf_handle).map_err(into_other)?;
let range = self.get_cf_key_range(cf_handle).map_err(io::Error::other)?;
if let Some(range) = range {
batch.delete_range_cf(cf_handle, range.start(), range.end());
// delete_range_cf deletes ["begin_key", "end_key"), so need one more delete
Expand All @@ -378,7 +378,7 @@ impl Database for RocksDB {
}
}
}
self.db.write(batch).map_err(into_other)
self.db.write(batch).map_err(io::Error::other)
}

fn compact(&self) -> io::Result<()> {
Expand All @@ -392,7 +392,7 @@ impl Database for RocksDB {
// Need to iterator over all CFs because the normal `flush()` only
// flushes the default column family.
for col in DBCol::iter() {
self.db.flush_cf(self.cf_handle(col)?).map_err(into_other)?;
self.db.flush_cf(self.cf_handle(col)?).map_err(io::Error::other)?;
}
Ok(())
}
Expand Down Expand Up @@ -640,14 +640,6 @@ fn parse_statistics(
Ok(())
}

fn other_error(msg: String) -> io::Error {
io::Error::new(io::ErrorKind::Other, msg)
}

fn into_other(error: rocksdb::Error) -> io::Error {
io::Error::new(io::ErrorKind::Other, error.into_string())
}

/// Returns name of a RocksDB column family corresponding to given column.
///
/// Historically we used `col##` names (with `##` being index of the column).
Expand Down
8 changes: 2 additions & 6 deletions core/store/src/db/rocksdb/instance_tracker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,17 +205,13 @@ impl NoFile for RealNoFile {

#[test]
fn test_ensure_max_open_files_limit() {
fn other_error(msg: &str) -> std::io::Error {
super::other_error(msg.to_string())
}

/// Mock implementation of NoFile interface.
struct MockNoFile<'a>(&'a mut (u64, u64));

impl<'a> NoFile for MockNoFile<'a> {
fn get(&self) -> std::io::Result<(u64, u64)> {
if self.0 .0 == 666 {
Err(other_error("error"))
Err(std::io::ErrorKind::Other.into())
} else {
Ok(*self.0)
}
Expand All @@ -224,7 +220,7 @@ fn test_ensure_max_open_files_limit() {
fn set(&mut self, soft: u64, hard: u64) -> std::io::Result<()> {
let (old_soft, old_hard) = self.get().unwrap();
if old_hard == 666000 {
Err(other_error("error"))
Err(std::io::ErrorKind::Other.into())
} else {
assert!(soft != old_soft, "Pointless call to set");
*self.0 = (soft, hard);
Expand Down
4 changes: 2 additions & 2 deletions core/store/src/db/rocksdb/snapshot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ impl std::convert::From<io::Error> for SnapshotError {

impl std::convert::From<::rocksdb::Error> for SnapshotError {
fn from(err: ::rocksdb::Error) -> Self {
super::into_other(err).into()
io::Error::other(err).into()
}
}

Expand Down Expand Up @@ -94,7 +94,7 @@ impl Snapshot {
}

let db = super::RocksDB::open(db_path, config, crate::Mode::ReadWriteExisting, temp)?;
let cp = Checkpoint::new(&db.db).map_err(super::into_other)?;
let cp = Checkpoint::new(&db.db).map_err(io::Error::other)?;
cp.create_checkpoint(&snapshot_path)?;

Ok(Self(Some(snapshot_path)))
Expand Down
12 changes: 4 additions & 8 deletions core/store/src/flat/store_helper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,17 +139,13 @@ pub fn encode_flat_state_db_key(shard_uid: ShardUId, key: &[u8]) -> Vec<u8> {

pub fn decode_flat_state_db_key(key: &[u8]) -> io::Result<(ShardUId, Vec<u8>)> {
if key.len() < 8 {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("expected FlatState key length to be at least 8: {key:?}"),
));
return Err(io::Error::other(format!(
"expected FlatState key length to be at least 8: {key:?}"
)));
}
let (shard_uid_bytes, trie_key) = key.split_at(8);
let shard_uid = shard_uid_bytes.try_into().map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("failed to decode shard_uid as part of FlatState key: {err}"),
)
io::Error::other(format!("failed to decode shard_uid as part of FlatState key: {err}"))
})?;
Ok((shard_uid, trie_key.to_vec()))
}
Expand Down
Loading

0 comments on commit e7b23c0

Please sign in to comment.