diff --git a/.sqlx/query-ca516d5a6f16e7877c129ec68edfb8eb4358479355cc4fe0a85ad1ea8d6bd014.json b/.sqlx/query-ca516d5a6f16e7877c129ec68edfb8eb4358479355cc4fe0a85ad1ea8d6bd014.json new file mode 100644 index 00000000..9ca9b604 --- /dev/null +++ b/.sqlx/query-ca516d5a6f16e7877c129ec68edfb8eb4358479355cc4fe0a85ad1ea8d6bd014.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT MAX(end_height) AS latest_bundled_height FROM bundles", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "latest_bundled_height", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + null + ] + }, + "hash": "ca516d5a6f16e7877c129ec68edfb8eb4358479355cc4fe0a85ad1ea8d6bd014" +} diff --git a/.sqlx/query-126284fed623566f0551d4e6a343ddbd8800dd6c27165f89fc72970fe8a89147.json b/.sqlx/query-ddc1a18d0d257b9065830b46a10ce42fee96b0925eb2c30a0b98cf9f79c6ed76.json similarity index 58% rename from .sqlx/query-126284fed623566f0551d4e6a343ddbd8800dd6c27165f89fc72970fe8a89147.json rename to .sqlx/query-ddc1a18d0d257b9065830b46a10ce42fee96b0925eb2c30a0b98cf9f79c6ed76.json index 0b8b6451..86a298a0 100644 --- a/.sqlx/query-126284fed623566f0551d4e6a343ddbd8800dd6c27165f89fc72970fe8a89147.json +++ b/.sqlx/query-ddc1a18d0d257b9065830b46a10ce42fee96b0925eb2c30a0b98cf9f79c6ed76.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT f.*\n FROM l1_fragments f\n JOIN l1_transaction_fragments tf ON tf.fragment_id = f.id\n JOIN l1_blob_transaction t ON t.id = tf.transaction_id\n WHERE t.hash = $1\n ", + "query": "\n SELECT\n f.*,\n b.start_height\n FROM l1_fragments f\n JOIN l1_transaction_fragments tf ON tf.fragment_id = f.id\n JOIN l1_blob_transaction t ON t.id = tf.transaction_id\n JOIN bundles b ON b.id = f.bundle_id\n WHERE t.hash = $1\n ", "describe": { "columns": [ { @@ -32,6 +32,11 @@ "ordinal": 5, "name": "bundle_id", "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "start_height", + "type_info": "Int8" } ], "parameters": { @@ -45,8 +50,9 @@ false, false, false, + false, false ] }, - "hash": "126284fed623566f0551d4e6a343ddbd8800dd6c27165f89fc72970fe8a89147" + "hash": "ddc1a18d0d257b9065830b46a10ce42fee96b0925eb2c30a0b98cf9f79c6ed76" } diff --git a/.sqlx/query-11c3dc9c06523c39e928bfc1c2947309b2f92155b5d2198e39b42f687cc58f40.json b/.sqlx/query-ed56ffeb0264867943f7891de21ff99a2bfb27dd1e51d0f877f939e29b7f3a52.json similarity index 51% rename from .sqlx/query-11c3dc9c06523c39e928bfc1c2947309b2f92155b5d2198e39b42f687cc58f40.json rename to .sqlx/query-ed56ffeb0264867943f7891de21ff99a2bfb27dd1e51d0f877f939e29b7f3a52.json index 2fd79840..9fe76b57 100644 --- a/.sqlx/query-11c3dc9c06523c39e928bfc1c2947309b2f92155b5d2198e39b42f687cc58f40.json +++ b/.sqlx/query-ed56ffeb0264867943f7891de21ff99a2bfb27dd1e51d0f877f939e29b7f3a52.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT\n sub.id,\n sub.idx,\n sub.bundle_id,\n sub.data,\n sub.unused_bytes,\n sub.total_bytes\n FROM (\n SELECT DISTINCT ON (f.id)\n f.*,\n b.start_height\n FROM l1_fragments f\n JOIN bundles b ON b.id = f.bundle_id\n WHERE\n b.end_height >= $2\n AND NOT EXISTS (\n SELECT 1\n FROM l1_transaction_fragments tf\n JOIN l1_blob_transaction t ON t.id = tf.transaction_id\n WHERE tf.fragment_id = f.id\n AND t.state <> $1\n )\n ORDER BY\n f.id,\n b.start_height ASC,\n f.idx ASC\n ) AS sub\n ORDER BY\n sub.start_height ASC,\n sub.idx ASC\n LIMIT $3;\n", + "query": "SELECT\n sub.id,\n sub.idx,\n sub.bundle_id,\n sub.data,\n sub.unused_bytes,\n sub.total_bytes,\n sub.start_height\n FROM (\n SELECT DISTINCT ON (f.id)\n f.*,\n b.start_height\n FROM l1_fragments f\n JOIN bundles b ON b.id = f.bundle_id\n WHERE\n b.end_height >= $2\n AND NOT EXISTS (\n SELECT 1\n FROM l1_transaction_fragments tf\n JOIN l1_blob_transaction t ON t.id = tf.transaction_id\n WHERE tf.fragment_id = f.id\n AND t.state <> $1\n )\n ORDER BY\n f.id,\n b.start_height ASC,\n f.idx ASC\n ) AS sub\n ORDER BY\n sub.start_height ASC,\n sub.idx ASC\n LIMIT $3;\n", "describe": { "columns": [ { @@ -32,6 +32,11 @@ "ordinal": 5, "name": "total_bytes", "type_info": "Int8" + }, + { + "ordinal": 6, + "name": "start_height", + "type_info": "Int8" } ], "parameters": { @@ -47,8 +52,9 @@ false, false, false, + false, false ] }, - "hash": "11c3dc9c06523c39e928bfc1c2947309b2f92155b5d2198e39b42f687cc58f40" + "hash": "ed56ffeb0264867943f7891de21ff99a2bfb27dd1e51d0f877f939e29b7f3a52" } diff --git a/Cargo.lock b/Cargo.lock index 6b99e671..83f8ee14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2673,6 +2673,7 @@ dependencies = [ "serde", "serde_json", "services", + "static_assertions", "test-case", "thiserror 1.0.69", "tokio", @@ -2725,6 +2726,25 @@ dependencies = [ "bytes", ] +[[package]] +name = "fee_algo_simulation" +version = "0.10.5" +dependencies = [ + "actix-web", + "anyhow", + "eth", + "futures", + "itertools 0.13.0", + "serde", + "serde_json", + "services", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-subscriber", + "xdg", +] + [[package]] name = "ff" version = "0.13.0" @@ -4251,6 +4271,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + [[package]] name = "md-5" version = "0.10.6" @@ -4601,7 +4630,7 @@ checksum = "914a1c2265c98e2446911282c6ac86d8524f495792c38c5bd884f80499c7538a" dependencies = [ "parse-display-derive", "regex", - "regex-syntax", + "regex-syntax 0.8.5", ] [[package]] @@ -4613,7 +4642,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "regex-syntax", + "regex-syntax 0.8.5", "structmeta", "syn 2.0.90", ] @@ -4896,7 +4925,7 @@ dependencies = [ "rand", "rand_chacha", "rand_xorshift", - "regex-syntax", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -5079,8 +5108,17 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata", - "regex-syntax", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", ] [[package]] @@ -5091,7 +5129,7 @@ checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.8.5", ] [[package]] @@ -5100,6 +5138,12 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.8.5" @@ -5761,7 +5805,6 @@ dependencies = [ "async-trait", "bytesize", "clock", - "delegate", "eth", "fuel-block-committer-encoding", "fuel-core-client", @@ -6826,10 +6869,14 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ + "matchers", + "once_cell", + "regex", "serde", "serde_json", "sharded-slab", "thread_local", + "tracing", "tracing-core", "tracing-serde", ] @@ -7460,6 +7507,12 @@ dependencies = [ "tap", ] +[[package]] +name = "xdg" +version = "2.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213b7324336b53d2414b2db8537e56544d981803139155afa84f76eeebb7a546" + [[package]] name = "xmlparser" version = "0.13.6" diff --git a/Cargo.toml b/Cargo.toml index edb584d5..7c45ddfb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ resolver = "2" members = [ "committer", "e2e", + "fee_algo_simulation", "packages/adapters/clock", "packages/adapters/eth", "packages/adapters/fuel", @@ -38,6 +39,7 @@ actix-web = { version = "4", default-features = false } bitvec = { version = "1.0", default-features = false } bytesize = { version = "1.3", default-features = false } alloy = { version = "0.3.6", default-features = false } +xdg = { version = "2.5", default-features = false } proptest = { version = "1.0", default-features = false } rayon = { version = "1.10", default-features = false } num_cpus = { version = "1.16", default-features = false } diff --git a/README.md b/README.md index d3574523..ceffbe88 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,22 @@ The Fuel Block Committer is a standalone service dedicated to uploading Fuel Block metadata to Ethereum. +## Table of Contents + +- [Building](#building) +- [Testing](#testing) +- [Schema Visualization](#schema-visualization) + - [Generating Schema Diagrams](#generating-schema-diagrams) +- [Configuration](#configuration) + - [Environment Variables](#environment-variables) + - [Ethereum (ETH) Configuration](#ethereum-eth-configuration) + - [Fuel Configuration](#fuel-configuration) + - [Database (DB) Configuration](#database-db-configuration) + - [Application (App) Configuration](#application-app-configuration) + - [Bundle Configuration](#bundle-configuration) + - [Configuration Validation](#configuration-validation) +- [Running the Fee Algo Simulator](#running-the-fee-algo-simulator) + ## Building Building the project doesn't require any special steps beyond the standard Cargo build process. @@ -15,8 +31,8 @@ cargo build To run the end-to-end (e2e) tests, you need to have the following installed and available in your `PATH`: - [Foundry](https://github.com/foundry-rs/foundry) -- [fuel-core](https://github.com/FuelLabs/fuelup) (can be installed via [fuelup](https://github.com/FuelLabs/fuelup)) -- fuel-block-committer +- [Fuel Core](https://github.com/FuelLabs/fuelup) (can be installed via [fuelup](https://github.com/FuelLabs/fuelup)) +- `fuel-block-committer` binary You can also use `run_tests.sh`, which takes care of building the `fuel-block-committer` binary and making it available on `PATH` prior to running the e2e tests. @@ -51,7 +67,7 @@ The Fuel Block Committer is configured primarily through environment variables. - **`COMMITTER__ETH__L1_KEYS__MAIN`** - - **Description:** The Ethereum key authorized by the L1 fuel chain state contract to post block commitments. + - **Description:** The Ethereum key authorized by the L1 Fuel chain state contract to post block commitments. - **Format:** `Kms()` or `Private()` - **Example:** `Kms(arn:aws:kms:us-east-1:123456789012:key/abcd-1234)` @@ -78,7 +94,8 @@ The Fuel Block Committer is configured primarily through environment variables. - **Example:** `http://localhost:4000/graphql` - **`COMMITTER__FUEL__NUM_BUFFERED_REQUESTS`** - - **Description:** Number of concurrent http requests towards the fuel node. + + - **Description:** Number of concurrent HTTP requests towards the Fuel node. - **Type:** Positive integer - **Example:** `5` @@ -117,6 +134,7 @@ The Fuel Block Committer is configured primarily through environment variables. - **Example:** `10` - **`COMMITTER__APP__DB__USE_SSL`** + - **Description:** Whether to use SSL when connecting to the PostgreSQL server. - **Type:** `bool` - **Values:** `true` or `false` @@ -137,7 +155,7 @@ The Fuel Block Committer is configured primarily through environment variables. - **`COMMITTER__APP__BLOCK_CHECK_INTERVAL`** - - **Description:** How often to check for new fuel blocks. + - **Description:** How often to check for new Fuel blocks. - **Format:** Human-readable duration (e.g., `5s`, `1m`) - **Example:** `5s` @@ -147,6 +165,12 @@ The Fuel Block Committer is configured primarily through environment variables. - **Format:** Human-readable duration - **Example:** `5s` +- **`COMMITTER__APP__L1_FEE_CHECK_INTERVAL`** + + - **Description:** How often to check for L1 fees. + - **Format:** Human-readable duration + - **Example:** `10s` + - **`COMMITTER__APP__NUM_BLOCKS_TO_FINALIZE_TX`** - **Description:** Number of L1 blocks that need to pass to accept the transaction as finalized. @@ -166,22 +190,70 @@ The Fuel Block Committer is configured primarily through environment variables. - **Example:** `4000000000000000` - **`COMMITTER__APP__SEND_TX_REQUEST_TIMEOUT`** + - **Description:** Duration for timeout when sending transaction requests. - **Format:** Human-readable duration - **Example:** `10s` +- **`COMMITTER__APP__STATE_PRUNER_RETENTION`** + + - **Description:** Retention duration for state pruner. + - **Format:** Human-readable duration + - **Example:** `1h` + +- **`COMMITTER__APP__STATE_PRUNER_RUN_INTERVAL`** + + - **Description:** How often to run the state pruner. + - **Format:** Human-readable duration + - **Example:** `30m` + +- **`COMMITTER__APP__FEE_ALGO_SHORT_SMA_BLOCKS`** + + - **Description:** Short-term period for the fee algo in block numbers. + - **Type:** Positive integer (`NonZeroU64`) + - **Example:** `25` + +- **`COMMITTER__APP__FEE_ALGO_LONG_SMA_BLOCKS`** + + - **Description:** Long-term period for the fee algo in block numbers. + - **Type:** Positive integer (`NonZeroU64`) + - **Example:** `300` + +- **`COMMITTER__APP__FEE_ALGO_MAX_L2_BLOCKS_BEHIND`** + + - **Description:** Maximum number of unposted L2 blocks before sending a transaction regardless of fees. + - **Type:** Positive integer (`NonZeroU32`) + - **Example:** `28800` + +- **`COMMITTER__APP__FEE_ALGO_START_MAX_FEE_MULTIPLIER`** + + - **Description:** Starting multiplier applied when we're 0 L2 blocks behind. + - **Type:** `f64` + - **Example:** `0.800000` + +- **`COMMITTER__APP__FEE_ALGO_END_MAX_FEE_MULTIPLIER`** + + - **Description:** Ending multiplier applied if we're `max_l2_blocks_behind - 1` blocks behind. + - **Type:** `f64` + - **Example:** `1.200000` + +- **`COMMITTER__APP__FEE_ALGO_ALWAYS_ACCEPTABLE_FEE`** + - **Description:** A fee that is always acceptable regardless of other conditions. + - **Type:** `u64` + - **Example:** `1000000000000000` + #### Bundle Configuration - **`COMMITTER__APP__BUNDLE__ACCUMULATION_TIMEOUT`** - - **Description:** Duration to wait for additional fuel blocks before initiating the bundling process. + - **Description:** Duration to wait for additional Fuel blocks before initiating the bundling process. - **Format:** Human-readable duration - **Example:** `30s` - **`COMMITTER__APP__BUNDLE__BLOCKS_TO_ACCUMULATE`** - - **Description:** Number of fuel blocks to accumulate before initiating the bundling process. - - **Type:** Positive integer + - **Description:** Number of Fuel blocks to accumulate before initiating the bundling process. + - **Type:** Positive integer (`NonZeroUsize`) - **Example:** `5` - **`COMMITTER__APP__BUNDLE__OPTIMIZATION_TIMEOUT`** @@ -190,12 +262,6 @@ The Fuel Block Committer is configured primarily through environment variables. - **Format:** Human-readable duration - **Example:** `60s` -- **`COMMITTER__APP__BUNDLE__COMPRESSION_LEVEL`** - - - **Description:** Compression level used for compressing block data before submission. - - **Values:** `"disabled"`, `"min"`, `"level1"`..`"level9"`, `"max"` - - **Example:** `"min"` - - **`COMMITTER__APP__BUNDLE__OPTIMIZATION_STEP`** - **Description:** Size of the optimization step at the start of the optimization process. @@ -215,10 +281,17 @@ The Fuel Block Committer is configured primarily through environment variables. - **Example:** `30s` - **`COMMITTER__APP__BUNDLE__NEW_BUNDLE_CHECK_INTERVAL`** + - **Description:** Duration to wait before checking if a new bundle can be made. - **Format:** Human-readable duration - **Example:** `15s` +- **`COMMITTER__APP__BUNDLE__COMPRESSION_LEVEL`** + + - **Description:** Compression level used for compressing block data before submission. + - **Values:** `"disabled"`, `"min"`, `"level1"`..`"level9"`, `"max"` + - **Example:** `"min"` + ### Configuration Validation The committer performs validation on the provided configuration to ensure consistency and correctness. For example: @@ -226,5 +299,18 @@ The committer performs validation on the provided configuration to ensure consis - **Wallet Keys:** The main wallet key and blob pool wallet key must be different. - **Fragments to Accumulate:** Must be less than or equal to 6. - **Block Height Lookback:** Must be greater than or equal to the number of blocks to accumulate. +- **Fee Multiplier Range:** Must have valid start and end multipliers. If any validation fails, the committer will return an error, preventing it from running with invalid settings. + +## Running the Fee Algo Simulator + +The **Fee Algo Simulator** is a separate binary designed to simulate and analyze fee algorithms using data from eth mainnet. + +### Running the Simulator + +To run the Fee Algo Simulator, execute the following command: + +```shell +cargo run --release --bin fee_algo_simulation +``` diff --git a/committer/src/config.rs b/committer/src/config.rs index 54de1294..859ff328 100644 --- a/committer/src/config.rs +++ b/committer/src/config.rs @@ -1,6 +1,6 @@ use std::{ net::Ipv4Addr, - num::{NonZeroU32, NonZeroUsize}, + num::{NonZeroU32, NonZeroU64, NonZeroUsize}, str::FromStr, time::Duration, }; @@ -9,6 +9,7 @@ use clap::{command, Parser}; use eth::{Address, L1Keys}; use fuel_block_committer_encoding::bundle::CompressionLevel; use serde::Deserialize; +use services::state_committer::{AlgoConfig, FeeMultiplierRange, FeeThresholds, SmaPeriods}; use storage::DbConfig; use url::Url; @@ -20,6 +21,11 @@ pub struct Config { } impl Config { + pub fn fee_algo_config(&self) -> AlgoConfig { + self.validated_fee_algo_config() + .expect("already validated via `validate` in main") + } + pub fn validate(&self) -> crate::errors::Result<()> { let keys = &self.eth.l1_keys; if keys @@ -45,8 +51,33 @@ impl Config { )); } + if let Err(e) = self.validated_fee_algo_config() { + return Err(crate::errors::Error::Other(format!( + "Invalid fee algo config: {e}", + ))); + } + Ok(()) } + + fn validated_fee_algo_config(&self) -> crate::errors::Result { + let config = self; + let algo_config = services::state_committer::AlgoConfig { + sma_periods: SmaPeriods { + short: config.app.fee_algo.short_sma_blocks, + long: config.app.fee_algo.long_sma_blocks, + }, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: config.app.fee_algo.max_l2_blocks_behind, + multiplier_range: FeeMultiplierRange::new( + config.app.fee_algo.start_max_fee_multiplier, + config.app.fee_algo.end_max_fee_multiplier, + )?, + always_acceptable_fee: config.app.fee_algo.always_acceptable_fee as u128, + }, + }; + Ok(algo_config) + } } #[derive(Debug, Clone, Deserialize)] @@ -94,6 +125,9 @@ pub struct App { /// How often to check for finalized l1 txs #[serde(deserialize_with = "human_readable_duration")] pub tx_finalization_check_interval: Duration, + /// How often to check for l1 fees + #[serde(deserialize_with = "human_readable_duration")] + pub l1_fee_check_interval: Duration, /// Number of L1 blocks that need to pass to accept the tx as finalized pub num_blocks_to_finalize_tx: u64, /// Interval after which to bump a pending tx @@ -112,6 +146,30 @@ pub struct App { /// How often to run state pruner #[serde(deserialize_with = "human_readable_duration")] pub state_pruner_run_interval: Duration, + /// Configuration for the fee algorithm used by the StateCommitter + pub fee_algo: FeeAlgoConfig, +} + +/// Configuration for the fee algorithm used by the StateCommitter +#[derive(Debug, Clone, Deserialize)] +pub struct FeeAlgoConfig { + /// Short-term period for Simple Moving Average (SMA) in block numbers + pub short_sma_blocks: NonZeroU64, + + /// Long-term period for Simple Moving Average (SMA) in block numbers + pub long_sma_blocks: NonZeroU64, + + /// Maximum number of unposted L2 blocks before sending a transaction regardless of fees + pub max_l2_blocks_behind: NonZeroU32, + + /// Starting multiplier applied when we're 0 l2 blocks behind + pub start_max_fee_multiplier: f64, + + /// Ending multiplier applied if we're max_l2_blocks_behind - 1 blocks behind + pub end_max_fee_multiplier: f64, + + /// A fee that is always acceptable regardless of other conditions + pub always_acceptable_fee: u64, } /// Configuration settings for managing fuel block bundling and fragment submission operations. @@ -205,15 +263,19 @@ pub struct Internal { pub eth_errors_before_unhealthy: usize, pub balance_update_interval: Duration, pub cost_request_limit: usize, + pub l1_blocks_cached_for_fee_metrics_tracker: usize, } impl Default for Internal { fn default() -> Self { + const ETH_BLOCK_TIME: usize = 12; + const ETH_BLOCKS_PER_DAY: usize = 24 * 3600 / ETH_BLOCK_TIME; Self { fuel_errors_before_unhealthy: 3, eth_errors_before_unhealthy: 3, balance_update_interval: Duration::from_secs(10), cost_request_limit: 1000, + l1_blocks_cached_for_fee_metrics_tracker: ETH_BLOCKS_PER_DAY, } } } diff --git a/committer/src/main.rs b/committer/src/main.rs index 9b9bf2d6..a56241e7 100644 --- a/committer/src/main.rs +++ b/committer/src/main.rs @@ -7,6 +7,7 @@ mod setup; use api::launch_api_server; use errors::{Result, WithContext}; use metrics::prometheus::Registry; +use services::fees::cache::CachingApi; use setup::last_finalization_metric; use tokio_util::sync::CancellationToken; @@ -72,13 +73,27 @@ async fn main() -> Result<()> { &metrics_registry, ); + let fee_api = CachingApi::new( + ethereum_rpc.clone(), + internal_config.l1_blocks_cached_for_fee_metrics_tracker, + ); + + let fee_metrics_updater_handle = setup::fee_metrics_tracker( + fee_api.clone(), + cancel_token.clone(), + &config, + &metrics_registry, + )?; + let state_committer_handle = setup::state_committer( fuel_adapter.clone(), ethereum_rpc.clone(), storage.clone(), cancel_token.clone(), &config, - ); + &metrics_registry, + fee_api, + )?; let state_importer_handle = setup::block_importer(fuel_adapter, storage.clone(), cancel_token.clone(), &config); @@ -93,7 +108,7 @@ async fn main() -> Result<()> { ); // Enable pruner once the issue is resolved - //TODO: https://github.com/FuelLabs/fuel-block-committer/issues/173 + // TODO: https://github.com/FuelLabs/fuel-block-committer/issues/173 // let state_pruner_handle = setup::state_pruner( // storage.clone(), // cancel_token.clone(), @@ -105,8 +120,9 @@ async fn main() -> Result<()> { handles.push(state_importer_handle); handles.push(block_bundler); handles.push(state_listener_handle); + handles.push(fee_metrics_updater_handle); // Enable pruner once the issue is resolved - //TODO: https://github.com/FuelLabs/fuel-block-committer/issues/173 + // TODO: https://github.com/FuelLabs/fuel-block-committer/issues/173 // handles.push(state_pruner_handle); } diff --git a/committer/src/setup.rs b/committer/src/setup.rs index db771797..db03fcde 100644 --- a/committer/src/setup.rs +++ b/committer/src/setup.rs @@ -9,6 +9,8 @@ use metrics::{ }; use services::{ block_committer::{port::l1::Contract, service::BlockCommitter}, + fee_metrics_tracker::service::FeeMetricsTracker, + fees::cache::CachingApi, state_committer::port::Storage, state_listener::service::StateListener, state_pruner::service::StatePruner, @@ -117,7 +119,9 @@ pub fn state_committer( storage: Database, cancel_token: CancellationToken, config: &config::Config, -) -> tokio::task::JoinHandle<()> { + registry: &Registry, + fee_api: CachingApi, +) -> Result> { let state_committer = services::StateCommitter::new( l1, fuel, @@ -127,17 +131,20 @@ pub fn state_committer( fragment_accumulation_timeout: config.app.bundle.fragment_accumulation_timeout, fragments_to_accumulate: config.app.bundle.fragments_to_accumulate, gas_bump_timeout: config.app.gas_bump_timeout, - tx_max_fee: config.app.tx_max_fee as u128, + fee_algo: config.fee_algo_config(), }, SystemClock, + fee_api, ); - schedule_polling( + state_committer.register_metrics(registry); + + Ok(schedule_polling( config.app.tx_finalization_check_interval, state_committer, "State Committer", cancel_token, - ) + )) } pub fn block_importer( @@ -316,3 +323,23 @@ pub async fn shut_down( storage.close().await; Ok(()) } + +pub fn fee_metrics_tracker( + api: CachingApi, + cancel_token: CancellationToken, + config: &config::Config, + registry: &Registry, +) -> Result> { + let fee_metrics_tracker = FeeMetricsTracker::new(api); + + fee_metrics_tracker.register_metrics(registry); + + let handle = schedule_polling( + config.app.l1_fee_check_interval, + fee_metrics_tracker, + "Fee Tracker", + cancel_token, + ); + + Ok(handle) +} diff --git a/e2e/src/committer.rs b/e2e/src/committer.rs index f2323a83..3350278f 100644 --- a/e2e/src/committer.rs +++ b/e2e/src/committer.rs @@ -73,6 +73,7 @@ impl Committer { .env("COMMITTER__APP__HOST", "127.0.0.1") .env("COMMITTER__APP__BLOCK_CHECK_INTERVAL", "5s") .env("COMMITTER__APP__TX_FINALIZATION_CHECK_INTERVAL", "5s") + .env("COMMITTER__APP__L1_FEE_CHECK_INTERVAL", "5s") .env("COMMITTER__APP__NUM_BLOCKS_TO_FINALIZE_TX", "3") .env("COMMITTER__APP__GAS_BUMP_TIMEOUT", "300s") .env("COMMITTER__APP__TX_MAX_FEE", "4000000000000000") @@ -121,6 +122,16 @@ impl Committer { "COMMITTER__APP__STATE_PRUNER_RUN_INTERVAL", get_field!(state_pruner_run_interval), ) + .env("COMMITTER__APP__FEE_ALGO__SHORT_SMA_BLOCKS", "1") + .env("COMMITTER__APP__FEE_ALGO__LONG_SMA_BLOCKS", "1") + .env("COMMITTER__APP__FEE_ALGO__MAX_L2_BLOCKS_BEHIND", "1") + .env("COMMITTER__APP__FEE_ALGO__START_MAX_FEE_MULTIPLIER", "1.0") + .env("COMMITTER__APP__FEE_ALGO__END_MAX_FEE_MULTIPLIER", "1.0") + // we're basically disabling the fee algo here + .env( + "COMMITTER__APP__FEE_ALGO__ALWAYS_ACCEPTABLE_FEE", + u64::MAX.to_string(), + ) .current_dir(Path::new(env!("CARGO_MANIFEST_DIR")).parent().unwrap()) .kill_on_drop(true); diff --git a/e2e/src/eth_node/state_contract.rs b/e2e/src/eth_node/state_contract.rs index 2d514e21..ca005865 100644 --- a/e2e/src/eth_node/state_contract.rs +++ b/e2e/src/eth_node/state_contract.rs @@ -10,7 +10,7 @@ use alloy::{ use eth::{AwsClient, AwsConfig, Signer, Signers, WebsocketClient}; use fs_extra::dir::{copy, CopyOptions}; use serde::Deserialize; -use services::{types::fuel::FuelBlock, types::Address}; +use services::types::{fuel::FuelBlock, Address}; use tokio::process::Command; use url::Url; diff --git a/e2e/src/lib.rs b/e2e/src/lib.rs index 94ee8bd9..4459a594 100644 --- a/e2e/src/lib.rs +++ b/e2e/src/lib.rs @@ -85,7 +85,7 @@ mod tests { } // Enable test once the issue is resolved - //TODO: https://github.com/FuelLabs/fuel-block-committer/issues/173 + // TODO: https://github.com/FuelLabs/fuel-block-committer/issues/173 #[ignore] #[tokio::test(flavor = "multi_thread")] async fn old_state_will_be_pruned() -> Result<()> { diff --git a/fee_algo_simulation/Cargo.toml b/fee_algo_simulation/Cargo.toml new file mode 100644 index 00000000..fef6a11a --- /dev/null +++ b/fee_algo_simulation/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "fee_algo_simulation" +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +repository = { workspace = true } +version = { workspace = true } +publish = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +futures = { workspace = true } +thiserror = { workspace = true } +actix-web = { workspace = true } +anyhow = { workspace = true } +eth = { workspace = true } +itertools = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +services = { workspace = true } +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = [ + "fmt", + "std", + "env-filter", +] } +xdg = { workspace = true } diff --git a/fee_algo_simulation/src/handlers.rs b/fee_algo_simulation/src/handlers.rs new file mode 100644 index 00000000..dadc8259 --- /dev/null +++ b/fee_algo_simulation/src/handlers.rs @@ -0,0 +1,267 @@ +use std::time::Duration; + +use actix_web::{web, HttpResponse, Responder, ResponseError}; +use anyhow::Result; +use eth::HttpClient; +use services::{ + fee_metrics_tracker::service::calculate_blob_tx_fee, + fees::{cache::CachingApi, Api, FeesAtHeight, SequentialBlockFees}, + state_committer::{AlgoConfig, SmaFeeAlgo}, + types::{DateTime, Utc}, +}; +use thiserror::Error; +use tracing::{error, info}; + +use super::{ + models::{FeeDataPoint, FeeParams, FeeResponse, FeeStats}, + state::AppState, + utils::last_n_blocks, +}; + +#[derive(Error, Debug)] +pub enum FeeError { + #[error("Internal Server Error: {0}")] + InternalError(String), + + #[error("Bad Request: {0}")] + BadRequest(String), +} + +impl ResponseError for FeeError { + fn error_response(&self) -> HttpResponse { + match self { + FeeError::InternalError(message) => { + HttpResponse::InternalServerError().body(message.clone()) + } + FeeError::BadRequest(message) => HttpResponse::BadRequest().body(message.clone()), + } + } +} + +pub async fn index_html() -> impl Responder { + let contents = include_str!("index.html"); + HttpResponse::Ok() + .content_type("text/html; charset=utf-8") + .body(contents) +} + +struct FeeHandler { + state: web::Data, + params: FeeParams, + config: AlgoConfig, + seq_fees: SequentialBlockFees, + last_block_height: u64, + last_block_time: DateTime, + sma_algo: SmaFeeAlgo>, +} + +impl FeeHandler { + async fn new(state: web::Data, params: FeeParams) -> Result { + let ending_height = Self::resolve_ending_height(&state, ¶ms).await?; + let start_height = ending_height.saturating_sub(params.amount_of_blocks); + let config = Self::parse_config(¶ms)?; + let seq_fees = Self::fetch_fees(&state, start_height, ending_height).await?; + let last_block = Self::get_last_block_info(&state, &seq_fees).await?; + let sma_algo = SmaFeeAlgo::new(state.fee_api.clone(), config); + + Ok(Self { + state, + params, + config, + seq_fees, + last_block_height: last_block.0, + last_block_time: last_block.1, + sma_algo, + }) + } + + async fn get_fees_response(&self) -> Result { + let data = self.calculate_fee_data().await?; + let stats = self.calculate_statistics(&data); + Ok(FeeResponse { data, stats }) + } + + async fn resolve_ending_height( + state: &web::Data, + params: &FeeParams, + ) -> Result { + if let Some(val) = params.ending_height { + Ok(val) + } else { + state.fee_api.current_height().await.map_err(|e| { + error!("Error fetching current height: {:?}", e); + FeeError::InternalError("Failed to fetch current height".into()) + }) + } + } + + fn parse_config(params: &FeeParams) -> Result { + AlgoConfig::try_from(params.clone()).map_err(|e| { + error!("Error parsing config: {:?}", e); + FeeError::BadRequest("Invalid configuration parameters".into()) + }) + } + + async fn fetch_fees( + state: &web::Data, + start: u64, + end: u64, + ) -> Result { + state.fee_api.fees(start..=end).await.map_err(|e| { + error!("Error fetching sequential fees: {:?}", e); + FeeError::InternalError("Failed to fetch sequential fees".into()) + }) + } + + async fn get_last_block_info( + state: &web::Data, + seq_fees: &SequentialBlockFees, + ) -> Result<(u64, DateTime), FeeError> { + let last_block = seq_fees.last(); + let last_block_time = state + .fee_api + .inner() + .get_block_time(last_block.height) + .await + .map_err(|e| { + error!("Error fetching last block time: {:?}", e); + FeeError::InternalError("Failed to fetch last block time".into()) + })? + .ok_or_else(|| { + error!("Last block time not found"); + FeeError::InternalError("Last block time not found".into()) + })?; + info!("Last block time: {}", last_block_time); + Ok((last_block.height, last_block_time)) + } + + async fn calculate_fee_data(&self) -> Result, FeeError> { + let mut data = Vec::with_capacity(self.seq_fees.len()); + + for block_fee in self.seq_fees.iter() { + let fee_data = self.process_block_fee(block_fee).await?; + data.push(fee_data); + } + + Ok(data) + } + + async fn process_block_fee(&self, block_fee: &FeesAtHeight) -> Result { + let current_fee_wei = calculate_blob_tx_fee(self.params.num_blobs, &block_fee.fees); + let short_fee_wei = self + .fetch_fee(block_fee.height, self.config.sma_periods.short) + .await?; + let long_fee_wei = self + .fetch_fee(block_fee.height, self.config.sma_periods.long) + .await?; + + let acceptable = self + .sma_algo + .fees_acceptable( + self.params.num_blobs, + self.params.num_l2_blocks_behind, + block_fee.height, + ) + .await + .map_err(|e| { + error!("Error determining fee acceptability: {:?}", e); + FeeError::InternalError("Failed to determine fee acceptability".into()) + })?; + + let block_gap = self.last_block_height - block_fee.height; + let block_time = self.last_block_time - Duration::from_secs(12 * block_gap); + + let convert = |wei| format!("{:.4}", (wei as f64) / 1e18); + + Ok(FeeDataPoint { + block_height: block_fee.height, + block_time: block_time.to_rfc3339(), + current_fee: convert(current_fee_wei), + short_fee: convert(short_fee_wei), + long_fee: convert(long_fee_wei), + acceptable, + }) + } + + async fn fetch_fee( + &self, + current_height: u64, + period: std::num::NonZeroU64, + ) -> Result { + let fees = self + .state + .fee_api + .fees(last_n_blocks(current_height, period)) + .await + .map_err(|e| { + error!("Error fetching fees for period: {:?}", e); + FeeError::InternalError("Failed to fetch fees".into()) + })? + .mean(); + Ok(calculate_blob_tx_fee(self.params.num_blobs, &fees)) + } + + fn calculate_statistics(&self, data: &[FeeDataPoint]) -> FeeStats { + let total_blocks = data.len() as f64; + let acceptable_blocks = data.iter().filter(|d| d.acceptable).count() as f64; + let percentage_acceptable = if total_blocks > 0.0 { + (acceptable_blocks / total_blocks) * 100.0 + } else { + 0.0 + }; + + let gap_sizes = self.compute_gap_sizes(data); + let percentile_95_gap_size = Self::calculate_percentile(&gap_sizes, 0.95); + let longest_unacceptable_streak = gap_sizes.into_iter().max().unwrap_or(0); + + FeeStats { + percentage_acceptable, + percentile_95_gap_size, + longest_unacceptable_streak, + } + } + + fn compute_gap_sizes(&self, data: &[FeeDataPoint]) -> Vec { + let mut gap_sizes = Vec::new(); + let mut current_gap = 0; + + for d in data { + if !d.acceptable { + current_gap += 1; + } else if current_gap > 0 { + gap_sizes.push(current_gap); + current_gap = 0; + } + } + + if current_gap > 0 { + gap_sizes.push(current_gap); + } + + gap_sizes + } + + fn calculate_percentile(gaps: &[u64], percentile: f64) -> u64 { + if gaps.is_empty() { + return 0; + } + + let mut sorted_gaps = gaps.to_vec(); + sorted_gaps.sort_unstable(); + + let index = ((sorted_gaps.len() as f64) * percentile).ceil() as usize - 1; + sorted_gaps[index.min(sorted_gaps.len() - 1)] + } +} + +pub async fn get_fees(state: web::Data, params: web::Query) -> impl Responder { + let handler = match FeeHandler::new(state.clone(), params.into_inner()).await { + Ok(h) => h, + Err(e) => return e.error_response(), + }; + + match handler.get_fees_response().await { + Ok(response) => HttpResponse::Ok().json(response), + Err(e) => e.error_response(), + } +} diff --git a/fee_algo_simulation/src/index.html b/fee_algo_simulation/src/index.html new file mode 100644 index 00000000..f088b2ba --- /dev/null +++ b/fee_algo_simulation/src/index.html @@ -0,0 +1,406 @@ + + + + + Fee Algo Simulator + + + + + + + + +
+

Fee Algo Simulator

+ + +
+
+
+
+
+ + +
+
+ + +
+
+ + +
+
+ +
+
+ + +
+
+ + +
+
+ +
+
+ + +
e.g., 8 hours worth
+
+
+ + +
+
+ +
+
+ + +
Multiplier applied to long-term SMA fee at start (e.g., 0.8 = 80%)
+
+
+ + +
Multiplier applied to long-term SMA fee at end (e.g., 1.05 = 105%)
+
+
+ +
+
+ + +
Value in Wei (e.g., 1 ETH = 1,000,000,000,000,000,000 Wei)
+
+
+ + +
+ + + + + + +
+
+
+
+ + +
+
+
+
+
+ + +
+
+

Statistics

+
+
+ +
+
+ +
+
+ +
+
+
+
+ Loading... +
+

Loading...

+
+
+
+
+ + + + diff --git a/fee_algo_simulation/src/main.rs b/fee_algo_simulation/src/main.rs new file mode 100644 index 00000000..c3208426 --- /dev/null +++ b/fee_algo_simulation/src/main.rs @@ -0,0 +1,54 @@ +use std::net::SocketAddr; + +use actix_web::web::{self, Data}; +use anyhow::Result; +use services::fees::cache::CachingApi; +use tracing::level_filters::LevelFilter; +use tracing_subscriber::EnvFilter; + +mod handlers; +mod models; +mod state; +mod utils; + +#[tokio::main] +async fn main() -> Result<()> { + let filter = EnvFilter::builder() + .with_default_directive(LevelFilter::INFO.into()) + .from_env()? + .add_directive("services::state_committer::fee_algo=off".parse()?); + + tracing_subscriber::fmt() + .with_env_filter(filter) + .compact() + .init(); + + let client = eth::HttpClient::new(models::URL).unwrap(); + + let num_blocks_per_month = 30 * 24 * 3600 / 12; + + let caching_api = CachingApi::new(client, num_blocks_per_month * 2); + caching_api.import(utils::load_cache()).await; + + let state = state::AppState { + fee_api: caching_api.clone(), + }; + + let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); + + let server = actix_web::HttpServer::new(move || { + actix_web::App::new() + .app_data(Data::new(state.clone())) + .service(web::resource("/").route(web::get().to(handlers::index_html))) + .service(web::resource("/fees").route(web::get().to(handlers::get_fees))) + }) + .bind(addr)?; + + eprintln!("Server listening on http://{}", addr); + + server.run().await?; + + utils::save_cache(caching_api.export().await)?; + + Ok(()) +} diff --git a/fee_algo_simulation/src/models.rs b/fee_algo_simulation/src/models.rs new file mode 100644 index 00000000..bd1a5096 --- /dev/null +++ b/fee_algo_simulation/src/models.rs @@ -0,0 +1,102 @@ +use std::num::{NonZeroU32, NonZeroU64}; + +use anyhow::Context; +use serde::{Deserialize, Serialize}; +use services::{ + fees::FeesAtHeight, + state_committer::{AlgoConfig, FeeMultiplierRange, FeeThresholds, SmaPeriods}, +}; + +pub const URL: &str = "https://eth.llamarpc.com"; + +/// Structure for saving fees to cache. +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct SavedFees { + pub fees: Vec, +} + +/// Query parameters for the `/fees` endpoint. +#[derive(Clone, Debug, Deserialize)] +pub struct FeeParams { + pub ending_height: Option, + pub amount_of_blocks: u64, + pub short: u64, + pub long: u64, + pub max_l2_blocks_behind: u32, + pub start_max_fee_multiplier: f64, + pub end_max_fee_multiplier: f64, + pub always_acceptable_fee: String, + pub num_blobs: u32, + pub num_l2_blocks_behind: u32, +} + +impl TryFrom for AlgoConfig { + type Error = anyhow::Error; + + fn try_from(value: FeeParams) -> Result { + let always_acceptable_fee = value + .always_acceptable_fee + .parse() + .context("invalid always_acceptable_fee value")?; + + let short = NonZeroU64::new(value.short).context("short sma period must be non-zero")?; + let long = NonZeroU64::new(value.long).context("long sma period must be non-zero")?; + + let sma_periods = SmaPeriods { short, long }; + + let max_l2_blocks_behind = NonZeroU32::new(value.max_l2_blocks_behind) + .context("max_l2_blocks_behind must be non-zero")?; + + let multiplier_range = + FeeMultiplierRange::new(value.start_max_fee_multiplier, value.end_max_fee_multiplier)?; + + Ok(AlgoConfig { + sma_periods, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind, + multiplier_range, + always_acceptable_fee, + }, + }) + } +} + +/// Response struct for each fee data point. +#[derive(Debug, Serialize)] +pub struct FeeDataPoint { + #[serde(rename = "blockHeight")] + pub block_height: u64, + + #[serde(rename = "blockTime")] + pub block_time: String, // ISO 8601 format + + #[serde(rename = "currentFee")] + pub current_fee: String, // ETH with 4 decimal places + + #[serde(rename = "shortFee")] + pub short_fee: String, // ETH with 4 decimal places + + #[serde(rename = "longFee")] + pub long_fee: String, // ETH with 4 decimal places + + pub acceptable: bool, +} + +#[derive(Debug, Serialize)] +pub struct FeeStats { + #[serde(rename = "percentageAcceptable")] + pub percentage_acceptable: f64, // Percentage of acceptable blocks + + #[serde(rename = "percentile95GapSize")] + pub percentile_95_gap_size: u64, // 95th percentile of gap sizes in blocks + + #[serde(rename = "longestUnacceptableStreak")] + pub longest_unacceptable_streak: u64, // Longest consecutive unacceptable blocks +} + +/// Complete response struct. +#[derive(Debug, Serialize)] +pub struct FeeResponse { + pub data: Vec, + pub stats: FeeStats, +} diff --git a/fee_algo_simulation/src/state.rs b/fee_algo_simulation/src/state.rs new file mode 100644 index 00000000..3299e6b7 --- /dev/null +++ b/fee_algo_simulation/src/state.rs @@ -0,0 +1,7 @@ +use services::fees::cache::CachingApi; + +/// Shared state across routes. +#[derive(Clone)] +pub struct AppState { + pub fee_api: CachingApi, +} diff --git a/fee_algo_simulation/src/utils.rs b/fee_algo_simulation/src/utils.rs new file mode 100644 index 00000000..542da592 --- /dev/null +++ b/fee_algo_simulation/src/utils.rs @@ -0,0 +1,51 @@ +use std::{ops::RangeInclusive, path::PathBuf}; + +use services::fees::{Fees, FeesAtHeight}; +use tracing::{error, info}; +use xdg::BaseDirectories; + +use super::models::SavedFees; + +/// Path to the fee cache file. +pub fn fee_file() -> PathBuf { + let xdg = BaseDirectories::with_prefix("fee_simulation").unwrap(); + if let Some(cache) = xdg.find_cache_file("fee_cache.json") { + cache + } else { + xdg.place_data_file("fee_cache.json").unwrap() + } +} + +pub fn load_cache() -> Vec<(u64, Fees)> { + let contents = match std::fs::read_to_string(fee_file()) { + Ok(contents) => contents, + Err(e) => { + error!("Failed to read fee cache file: {e}"); + return vec![]; + } + }; + + let fees: SavedFees = serde_json::from_str(&contents) + .inspect_err(|e| error!("error while deserializing json cache!: {e}")) + .unwrap_or_default(); + + info!("loaded from cache: {} fees", fees.fees.len()); + + fees.fees.into_iter().map(|f| (f.height, f.fees)).collect() +} + +pub fn save_cache(cache: impl IntoIterator) -> anyhow::Result<()> { + let fees = SavedFees { + fees: cache + .into_iter() + .map(|(height, fees)| FeesAtHeight { height, fees }) + .collect(), + }; + std::fs::write(fee_file(), serde_json::to_string(&fees)?)?; + info!("saved to cache: {} fees", fees.fees.len()); + Ok(()) +} + +pub fn last_n_blocks(current_block: u64, n: std::num::NonZeroU64) -> RangeInclusive { + current_block.saturating_sub(n.get().saturating_sub(1))..=current_block +} diff --git a/packages/adapters/eth/Cargo.toml b/packages/adapters/eth/Cargo.toml index 6c18d2c9..0ab31d3b 100644 --- a/packages/adapters/eth/Cargo.toml +++ b/packages/adapters/eth/Cargo.toml @@ -21,6 +21,7 @@ alloy = { workspace = true, features = [ "rpc-types", "reqwest-rustls-tls", ] } +static_assertions = { workspace = true } async-trait = { workspace = true } aws-config = { workspace = true, features = ["default"] } aws-sdk-kms = { workspace = true, features = ["default"] } diff --git a/packages/adapters/eth/src/blob_encoder.rs b/packages/adapters/eth/src/blob_encoder.rs new file mode 100644 index 00000000..a6a0ba0f --- /dev/null +++ b/packages/adapters/eth/src/blob_encoder.rs @@ -0,0 +1,136 @@ +use std::num::NonZeroUsize; + +use alloy::{ + consensus::BlobTransactionSidecar, + eips::eip4844::{BYTES_PER_BLOB, DATA_GAS_PER_BLOB}, +}; +use fuel_block_committer_encoding::blob; +use itertools::{izip, Itertools}; +use services::{ + types::{Fragment, NonEmpty, NonNegative}, + Result, +}; + +#[derive(Debug, Copy, Clone)] +pub struct BlobEncoder; + +impl BlobEncoder { + #[cfg(feature = "test-helpers")] + pub const FRAGMENT_SIZE: usize = BYTES_PER_BLOB; + + pub(crate) fn sidecar_from_fragments( + fragments: impl IntoIterator, + ) -> crate::error::Result { + let mut sidecar = BlobTransactionSidecar::default(); + + for fragment in fragments { + let data = Vec::from(fragment.data); + + sidecar.blobs.push(Default::default()); + let current_blob = sidecar.blobs.last_mut().expect("just added it"); + + sidecar.commitments.push(Default::default()); + let current_commitment = sidecar.commitments.last_mut().expect("just added it"); + + sidecar.proofs.push(Default::default()); + let current_proof = sidecar.proofs.last_mut().expect("just added it"); + + let read_location = data.as_slice(); + + current_blob.copy_from_slice(&read_location[..BYTES_PER_BLOB]); + let read_location = &read_location[BYTES_PER_BLOB..]; + + current_commitment.copy_from_slice(&read_location[..48]); + let read_location = &read_location[48..]; + + current_proof.copy_from_slice(&read_location[..48]); + } + + Ok(sidecar) + } +} + +impl services::block_bundler::port::l1::FragmentEncoder for BlobEncoder { + fn encode(&self, data: NonEmpty, id: NonNegative) -> Result> { + let data = Vec::from(data); + let encoder = blob::Encoder::default(); + let decoder = blob::Decoder::default(); + + let blobs = encoder.encode(&data, id.as_u32()).map_err(|e| { + crate::error::Error::Other(format!("failed to encode data as blobs: {e}")) + })?; + + let bits_usage: Vec<_> = blobs + .iter() + .map(|blob| { + let blob::Header::V1(header) = decoder.read_header(blob).map_err(|e| { + crate::error::Error::Other(format!("failed to read blob header: {e}")) + })?; + Result::Ok(header.num_bits) + }) + .try_collect()?; + + let sidecar = blob::generate_sidecar(blobs) + .map_err(|e| crate::error::Error::Other(format!("failed to generate sidecar: {e}")))?; + + let fragments = izip!( + &sidecar.blobs, + &sidecar.commitments, + &sidecar.proofs, + bits_usage + ) + .map(|(blob, commitment, proof, used_bits)| { + let mut data_commitment_and_proof = vec![0; blob.len() + 48 * 2]; + let write_location = &mut data_commitment_and_proof[..]; + + write_location[..blob.len()].copy_from_slice(blob.as_slice()); + let write_location = &mut write_location[blob.len()..]; + + write_location[..48].copy_from_slice(&(**commitment)); + let write_location = &mut write_location[48..]; + + write_location[..48].copy_from_slice(&(**proof)); + + let bits_per_blob = BYTES_PER_BLOB as u32 * 8; + + Fragment { + data: NonEmpty::from_vec(data_commitment_and_proof).expect("known to be non-empty"), + unused_bytes: bits_per_blob.saturating_sub(used_bits).saturating_div(8), + total_bytes: bits_per_blob + .saturating_div(8) + .try_into() + .expect("known to be non-zero"), + } + }) + .collect(); + + Ok(NonEmpty::from_vec(fragments).expect("known to be non-empty")) + } + + fn gas_usage(&self, num_bytes: NonZeroUsize) -> u64 { + blob::Encoder::default().blobs_needed_to_encode(num_bytes.get()) as u64 * DATA_GAS_PER_BLOB + } +} + +#[cfg(test)] +mod tests { + use alloy::eips::eip4844::DATA_GAS_PER_BLOB; + use fuel_block_committer_encoding::blob; + use services::block_bundler::port::l1::FragmentEncoder; + + use crate::blob_encoder::{self}; + + #[test] + fn gas_usage_correctly_calculated() { + // given + let num_bytes = 400_000; + let encoder = blob::Encoder::default(); + assert_eq!(encoder.blobs_needed_to_encode(num_bytes), 4); + + // when + let gas_usage = blob_encoder::BlobEncoder.gas_usage(num_bytes.try_into().unwrap()); + + // then + assert_eq!(gas_usage, 4 * DATA_GAS_PER_BLOB); + } +} diff --git a/packages/adapters/eth/src/fee_api_helpers.rs b/packages/adapters/eth/src/fee_api_helpers.rs new file mode 100644 index 00000000..f764a0c9 --- /dev/null +++ b/packages/adapters/eth/src/fee_api_helpers.rs @@ -0,0 +1,488 @@ +use std::{future::Future, ops::RangeInclusive}; + +use alloy::rpc::types::FeeHistory; +use futures::{stream, StreamExt, TryStreamExt}; +use itertools::{izip, Itertools}; +use services::{ + fees::{Fees, FeesAtHeight, SequentialBlockFees}, + Result, +}; +use static_assertions::const_assert; + +pub async fn batch_requests<'a, 'b, Fut, F>( + height_range: RangeInclusive, + get_fees: F, +) -> Result +where + 'a: 'b, + F: Fn(RangeInclusive, &'a [f64]) -> Fut, + Fut: Future> + 'b, +{ + const REWARD_PERCENTILE: f64 = + alloy::providers::utils::EIP1559_FEE_ESTIMATION_REWARD_PERCENTILE; + // so that a alloy version bump doesn't surprise us + const_assert!(REWARD_PERCENTILE == 20.0,); + + // There is a comment in alloy about not doing more than 1024 blocks at a time + const RPC_LIMIT: u64 = 1024; + + let fees: Vec<_> = stream::iter(chunk_range_inclusive(height_range, RPC_LIMIT)) + .then(|range| get_fees(range, std::slice::from_ref(&REWARD_PERCENTILE))) + .map(|fee_history| fee_history.and_then(unpack_fee_history)) + .map_ok(|block_fees_vec| stream::iter(block_fees_vec.into_iter().map(Result::Ok))) + .try_flatten() + .try_collect() + .await?; + + fees.try_into() + .map_err(|e| services::Error::Other(format!("{e}"))) +} + +fn unpack_fee_history(fees: FeeHistory) -> Result> { + let number_of_blocks = if fees.base_fee_per_gas.is_empty() { + 0 + } else { + // We subtract 1 because the last element is the expected fee for the next block + fees.base_fee_per_gas + .len() + .checked_sub(1) + .expect("checked not 0") + }; + + if number_of_blocks == 0 { + return Ok(vec![]); + } + + let Some(nested_rewards) = fees.reward.as_ref() else { + return Err(services::Error::Other(format!( + "missing rewards field: {fees:?}" + ))); + }; + + if number_of_blocks != nested_rewards.len() + || number_of_blocks != fees.base_fee_per_blob_gas.len() - 1 + { + return Err(services::Error::Other(format!( + "discrepancy in lengths of fee fields: {fees:?}" + ))); + } + + let rewards: Vec<_> = nested_rewards + .iter() + .map(|perc| { + perc.last().copied().ok_or_else(|| { + crate::error::Error::Other( + "should have had at least one reward percentile".to_string(), + ) + }) + }) + .try_collect()?; + + let fees = izip!( + (fees.oldest_block..), + fees.base_fee_per_gas.into_iter(), + fees.base_fee_per_blob_gas.into_iter(), + rewards + ) + .take(number_of_blocks) + .map( + |(height, base_fee_per_gas, base_fee_per_blob_gas, reward)| FeesAtHeight { + height, + fees: Fees { + base_fee_per_gas, + reward, + base_fee_per_blob_gas, + }, + }, + ) + .collect(); + + Ok(fees) +} + +pub fn chunk_range_inclusive( + initial_range: RangeInclusive, + chunk_size: u64, +) -> Vec> { + let mut ranges = Vec::new(); + + if chunk_size == 0 { + return ranges; + } + + let start = *initial_range.start(); + let end = *initial_range.end(); + + let mut current = start; + while current <= end { + let chunk_end = (current + chunk_size - 1).min(end); + + ranges.push(current..=chunk_end); + + current = chunk_end + 1; + } + + ranges +} + +#[cfg(test)] +mod tests { + use std::ops::RangeInclusive; + + use alloy::rpc::types::FeeHistory; + use services::fees::{Fees, FeesAtHeight}; + + use crate::fee_api_helpers::{chunk_range_inclusive, unpack_fee_history}; + + #[test] + fn test_chunk_size_zero() { + // given + let initial_range = 1..=10; + let chunk_size = 0; + + // when + let result = chunk_range_inclusive(initial_range, chunk_size); + + // then + let expected: Vec> = vec![]; + assert_eq!( + result, expected, + "Expected empty vector when chunk_size is zero" + ); + } + + #[test] + fn test_chunk_size_larger_than_range() { + // given + let initial_range = 1..=5; + let chunk_size = 10; + + // when + let result = chunk_range_inclusive(initial_range, chunk_size); + + // then + let expected = vec![1..=5]; + assert_eq!( + result, expected, + "Expected single chunk when chunk_size exceeds range length" + ); + } + + #[test] + fn test_exact_multiples() { + // given + let initial_range = 1..=10; + let chunk_size = 2; + + // when + let result = chunk_range_inclusive(initial_range, chunk_size); + + // then + let expected = vec![1..=2, 3..=4, 5..=6, 7..=8, 9..=10]; + assert_eq!(result, expected, "Chunks should exactly divide the range"); + } + + #[test] + fn test_non_exact_multiples() { + // given + let initial_range = 1..=10; + let chunk_size = 3; + + // when + let result = chunk_range_inclusive(initial_range, chunk_size); + + // then + let expected = vec![1..=3, 4..=6, 7..=9, 10..=10]; + assert_eq!( + result, expected, + "Last chunk should contain the remaining elements" + ); + } + + #[test] + fn test_single_element_range() { + // given + let initial_range = 5..=5; + let chunk_size = 1; + + // when + let result = chunk_range_inclusive(initial_range, chunk_size); + + // then + let expected = vec![5..=5]; + assert_eq!( + result, expected, + "Single element range should return one chunk with that element" + ); + } + + #[test] + fn test_start_equals_end_with_large_chunk_size() { + // given + let initial_range = 100..=100; + let chunk_size = 50; + + // when + let result = chunk_range_inclusive(initial_range, chunk_size); + + // then + let expected = vec![100..=100]; + assert_eq!( + result, expected, + "Single element range should return one chunk regardless of chunk_size" + ); + } + + #[test] + fn test_chunk_size_one() { + // given + let initial_range = 10..=15; + let chunk_size = 1; + + // when + let result = chunk_range_inclusive(initial_range, chunk_size); + + // then + let expected = vec![10..=10, 11..=11, 12..=12, 13..=13, 14..=14, 15..=15]; + assert_eq!( + result, expected, + "Each number should be its own chunk when chunk_size is one" + ); + } + + #[test] + fn test_unpack_fee_history_empty_base_fee() { + // given + let fees = FeeHistory { + oldest_block: 100, + base_fee_per_gas: vec![], + base_fee_per_blob_gas: vec![], + reward: Some(vec![]), + ..Default::default() + }; + + // when + let result = unpack_fee_history(fees); + + // then + let expected: Vec = vec![]; + assert_eq!( + result.unwrap(), + expected, + "Expected empty vector when base_fee_per_gas is empty" + ); + } + + #[test] + fn test_unpack_fee_history_missing_rewards() { + // given + let fees = FeeHistory { + oldest_block: 200, + base_fee_per_gas: vec![100, 200], + base_fee_per_blob_gas: vec![150, 250], + reward: None, + ..Default::default() + }; + + // when + let result = unpack_fee_history(fees.clone()); + + // then + let expected_error = services::Error::Other(format!("missing rewards field: {:?}", fees)); + assert_eq!( + result.unwrap_err(), + expected_error, + "Expected error due to missing rewards field" + ); + } + + #[test] + fn test_unpack_fee_history_discrepancy_in_lengths_base_fee_rewards() { + // given + let fees = FeeHistory { + oldest_block: 300, + base_fee_per_gas: vec![100, 200, 300], + base_fee_per_blob_gas: vec![150, 250, 350], + reward: Some(vec![vec![10]]), // Should have 2 rewards for 2 blocks + ..Default::default() + }; + + // when + let result = unpack_fee_history(fees.clone()); + + // then + let expected_error = + services::Error::Other(format!("discrepancy in lengths of fee fields: {:?}", fees)); + assert_eq!( + result.unwrap_err(), + expected_error, + "Expected error due to discrepancy in lengths of fee fields" + ); + } + + #[test] + fn test_unpack_fee_history_discrepancy_in_lengths_blob_gas() { + // given + let fees = FeeHistory { + oldest_block: 400, + base_fee_per_gas: vec![100, 200, 300], + base_fee_per_blob_gas: vec![150, 250], // Should have 3 elements + reward: Some(vec![vec![10], vec![20]]), + ..Default::default() + }; + + // when + let result = unpack_fee_history(fees.clone()); + + // then + let expected_error = + services::Error::Other(format!("discrepancy in lengths of fee fields: {:?}", fees)); + assert_eq!( + result.unwrap_err(), + expected_error, + "Expected error due to discrepancy in base_fee_per_blob_gas lengths" + ); + } + + #[test] + fn test_unpack_fee_history_empty_reward_percentile() { + // given + let fees = FeeHistory { + oldest_block: 500, + base_fee_per_gas: vec![100, 200], + base_fee_per_blob_gas: vec![150, 250], + reward: Some(vec![vec![]]), // Empty percentile + ..Default::default() + }; + + // when + let result = unpack_fee_history(fees.clone()); + + // then + let expected_error = + services::Error::Other("should have had at least one reward percentile".to_string()); + assert_eq!( + result.unwrap_err(), + expected_error, + "Expected error due to empty reward percentile" + ); + } + + #[test] + fn test_unpack_fee_history_single_block() { + // given + let fees = FeeHistory { + oldest_block: 600, + base_fee_per_gas: vec![100, 200], // number_of_blocks =1 + base_fee_per_blob_gas: vec![150, 250], + reward: Some(vec![vec![10]]), + ..Default::default() + }; + + // when + let result = unpack_fee_history(fees); + + // then + let expected = vec![FeesAtHeight { + height: 600, + fees: Fees { + base_fee_per_gas: 100.try_into().unwrap(), + reward: 10.try_into().unwrap(), + base_fee_per_blob_gas: 150.try_into().unwrap(), + }, + }]; + assert_eq!( + result.unwrap(), + expected, + "Expected one BlockFees entry for a single block" + ); + } + + #[test] + fn test_unpack_fee_history_multiple_blocks() { + // given + let fees = FeeHistory { + oldest_block: 700, + base_fee_per_gas: vec![100, 200, 300, 400], // number_of_blocks =3 + base_fee_per_blob_gas: vec![150, 250, 350, 450], + reward: Some(vec![vec![10], vec![20], vec![30]]), + ..Default::default() + }; + + // when + let result = unpack_fee_history(fees); + + // then + let expected = vec![ + FeesAtHeight { + height: 700, + fees: Fees { + base_fee_per_gas: 100.try_into().unwrap(), + reward: 10.try_into().unwrap(), + base_fee_per_blob_gas: 150.try_into().unwrap(), + }, + }, + FeesAtHeight { + height: 701, + fees: Fees { + base_fee_per_gas: 200.try_into().unwrap(), + reward: 20.try_into().unwrap(), + base_fee_per_blob_gas: 250.try_into().unwrap(), + }, + }, + FeesAtHeight { + height: 702, + fees: Fees { + base_fee_per_gas: 300.try_into().unwrap(), + reward: 30.try_into().unwrap(), + base_fee_per_blob_gas: 350.try_into().unwrap(), + }, + }, + ]; + assert_eq!( + result.unwrap(), + expected, + "Expected three BlockFees entries for three blocks" + ); + } + + #[test] + fn test_unpack_fee_history_large_values() { + // given + let fees = FeeHistory { + oldest_block: u64::MAX - 2, + base_fee_per_gas: vec![u128::MAX - 2, u128::MAX - 1, u128::MAX], + base_fee_per_blob_gas: vec![u128::MAX - 3, u128::MAX - 2, u128::MAX - 1], + reward: Some(vec![vec![u128::MAX - 4], vec![u128::MAX - 3]]), + ..Default::default() + }; + + // when + let result = unpack_fee_history(fees.clone()); + + // then + let expected = vec![ + FeesAtHeight { + height: u64::MAX - 2, + fees: Fees { + base_fee_per_gas: u128::MAX - 2, + reward: u128::MAX - 4, + base_fee_per_blob_gas: u128::MAX - 3, + }, + }, + FeesAtHeight { + height: u64::MAX - 1, + fees: Fees { + base_fee_per_gas: u128::MAX - 1, + reward: u128::MAX - 3, + base_fee_per_blob_gas: u128::MAX - 2, + }, + }, + ]; + assert_eq!( + result.unwrap(), + expected, + "Expected BlockFees entries with large u64 values" + ); + } +} diff --git a/packages/adapters/eth/src/http.rs b/packages/adapters/eth/src/http.rs new file mode 100644 index 00000000..1c659cda --- /dev/null +++ b/packages/adapters/eth/src/http.rs @@ -0,0 +1,75 @@ +use std::ops::RangeInclusive; + +use alloy::{ + providers::{Provider as AlloyProvider, ProviderBuilder, RootProvider}, + transports::http::{Client, Http}, +}; +use services::{ + fees::SequentialBlockFees, + types::{DateTime, Utc}, +}; +use tracing::info; + +use crate::fee_api_helpers::batch_requests; + +#[derive(Debug, Clone)] +pub struct Provider { + pub(crate) provider: RootProvider>, +} + +impl Provider { + pub fn new(url: &str) -> crate::Result { + let url = url + .parse() + .map_err(|e| crate::error::Error::Other(format!("invalid url: {url}: {e}")))?; + let provider = ProviderBuilder::new().on_http(url); + + Ok(Self { provider }) + } +} + +impl Provider { + pub async fn get_block_time(&self, block_num: u64) -> crate::Result>> { + let block = self + .provider + .get_block_by_number(alloy::eips::BlockNumberOrTag::Number(block_num), false) + .await + .map_err(|e| { + crate::error::Error::Other(format!("failed to get block by number: {e}")) + })?; + + let time = block.and_then(|block| { + let timestamp = block.header.timestamp; + DateTime::::from_timestamp(timestamp as i64, 0) + }); + + Ok(time) + } +} +impl services::fees::Api for Provider { + async fn fees(&self, height_range: RangeInclusive) -> crate::Result { + info!("Fetching fees for range: {:?}", height_range); + batch_requests(height_range, |sub_range, percentiles| async move { + let last_block = *sub_range.end(); + let block_count = sub_range.count() as u64; + let fees = self + .provider + .get_fee_history( + block_count, + alloy::eips::BlockNumberOrTag::Number(last_block), + percentiles, + ) + .await + .map_err(|e| services::Error::Network(format!("failed to get fee history: {e}")))?; + + Ok(fees) + }) + .await + } + async fn current_height(&self) -> crate::Result { + self.provider + .get_block_number() + .await + .map_err(|e| services::Error::Network(format!("failed to get block number: {e}"))) + } +} diff --git a/packages/adapters/eth/src/lib.rs b/packages/adapters/eth/src/lib.rs index 38e069fc..b4ef06a1 100644 --- a/packages/adapters/eth/src/lib.rs +++ b/packages/adapters/eth/src/lib.rs @@ -1,210 +1,15 @@ -use std::num::{NonZeroU32, NonZeroUsize}; - -use alloy::{ - consensus::BlobTransactionSidecar, - eips::eip4844::{BYTES_PER_BLOB, DATA_GAS_PER_BLOB}, - primitives::U256, -}; -use delegate::delegate; -use itertools::{izip, Itertools}; -use services::{ - types::{ - BlockSubmissionTx, Fragment, FragmentsSubmitted, L1Height, L1Tx, NonEmpty, NonNegative, - TransactionResponse, - }, - Result, -}; +use services::Result; mod aws; +mod blob_encoder; mod error; +mod fee_api_helpers; +mod http; mod metrics; mod websocket; pub use alloy::primitives::Address; pub use aws::*; -use fuel_block_committer_encoding::blob::{self, generate_sidecar}; +pub use blob_encoder::BlobEncoder; +pub use http::Provider as HttpClient; pub use websocket::{L1Key, L1Keys, Signer, Signers, TxConfig, WebsocketClient}; - -#[derive(Debug, Copy, Clone)] -pub struct BlobEncoder; - -impl BlobEncoder { - #[cfg(feature = "test-helpers")] - pub const FRAGMENT_SIZE: usize = BYTES_PER_BLOB; - - pub(crate) fn sidecar_from_fragments( - fragments: impl IntoIterator, - ) -> crate::error::Result { - let mut sidecar = BlobTransactionSidecar::default(); - - for fragment in fragments { - let data = Vec::from(fragment.data); - - sidecar.blobs.push(Default::default()); - let current_blob = sidecar.blobs.last_mut().expect("just added it"); - - sidecar.commitments.push(Default::default()); - let current_commitment = sidecar.commitments.last_mut().expect("just added it"); - - sidecar.proofs.push(Default::default()); - let current_proof = sidecar.proofs.last_mut().expect("just added it"); - - let read_location = data.as_slice(); - - current_blob.copy_from_slice(&read_location[..BYTES_PER_BLOB]); - let read_location = &read_location[BYTES_PER_BLOB..]; - - current_commitment.copy_from_slice(&read_location[..48]); - let read_location = &read_location[48..]; - - current_proof.copy_from_slice(&read_location[..48]); - } - - Ok(sidecar) - } -} - -impl services::block_bundler::port::l1::FragmentEncoder for BlobEncoder { - fn encode(&self, data: NonEmpty, id: NonNegative) -> Result> { - let data = Vec::from(data); - let encoder = blob::Encoder::default(); - let decoder = blob::Decoder::default(); - - let blobs = encoder.encode(&data, id.as_u32()).map_err(|e| { - crate::error::Error::Other(format!("failed to encode data as blobs: {e}")) - })?; - - let bits_usage: Vec<_> = blobs - .iter() - .map(|blob| { - let blob::Header::V1(header) = decoder.read_header(blob).map_err(|e| { - crate::error::Error::Other(format!("failed to read blob header: {e}")) - })?; - Result::Ok(header.num_bits) - }) - .try_collect()?; - - let sidecar = generate_sidecar(blobs) - .map_err(|e| crate::error::Error::Other(format!("failed to generate sidecar: {e}")))?; - - let fragments = izip!( - &sidecar.blobs, - &sidecar.commitments, - &sidecar.proofs, - bits_usage - ) - .map(|(blob, commitment, proof, used_bits)| { - let mut data_commitment_and_proof = vec![0; blob.len() + 48 * 2]; - let write_location = &mut data_commitment_and_proof[..]; - - write_location[..blob.len()].copy_from_slice(blob.as_slice()); - let write_location = &mut write_location[blob.len()..]; - - write_location[..48].copy_from_slice(&(**commitment)); - let write_location = &mut write_location[48..]; - - write_location[..48].copy_from_slice(&(**proof)); - - let bits_per_blob = BYTES_PER_BLOB as u32 * 8; - - Fragment { - data: NonEmpty::from_vec(data_commitment_and_proof).expect("known to be non-empty"), - unused_bytes: bits_per_blob.saturating_sub(used_bits).saturating_div(8), - total_bytes: bits_per_blob - .saturating_div(8) - .try_into() - .expect("known to be non-zero"), - } - }) - .collect(); - - Ok(NonEmpty::from_vec(fragments).expect("known to be non-empty")) - } - - fn gas_usage(&self, num_bytes: NonZeroUsize) -> u64 { - blob::Encoder::default().blobs_needed_to_encode(num_bytes.get()) as u64 * DATA_GAS_PER_BLOB - } -} - -impl services::block_committer::port::l1::Contract for WebsocketClient { - delegate! { - to self { - async fn submit(&self, hash: [u8; 32], height: u32) -> Result; - fn commit_interval(&self) -> NonZeroU32; - } - } -} - -impl services::state_listener::port::l1::Api for WebsocketClient { - delegate! { - to (*self) { - async fn get_transaction_response(&self, tx_hash: [u8; 32],) -> Result>; - async fn is_squeezed_out(&self, tx_hash: [u8; 32],) -> Result; - } - } - - async fn get_block_number(&self) -> Result { - let block_num = self._get_block_number().await?; - let height = L1Height::try_from(block_num)?; - - Ok(height) - } -} - -impl services::wallet_balance_tracker::port::l1::Api for WebsocketClient { - delegate! { - to (*self) { - async fn balance(&self, address: Address) -> Result; - } - } -} - -impl services::block_committer::port::l1::Api for WebsocketClient { - delegate! { - to (*self) { - async fn get_transaction_response(&self, tx_hash: [u8; 32],) -> Result>; - } - } - - async fn get_block_number(&self) -> Result { - let block_num = self._get_block_number().await?; - let height = L1Height::try_from(block_num)?; - - Ok(height) - } -} - -impl services::state_committer::port::l1::Api for WebsocketClient { - delegate! { - to (*self) { - async fn submit_state_fragments( - &self, - fragments: NonEmpty, - previous_tx: Option, - ) -> Result<(L1Tx, FragmentsSubmitted)>; - } - } -} - -#[cfg(test)] -mod test { - use alloy::eips::eip4844::DATA_GAS_PER_BLOB; - use fuel_block_committer_encoding::blob; - use services::block_bundler::port::l1::FragmentEncoder; - - use crate::BlobEncoder; - - #[test] - fn gas_usage_correctly_calculated() { - // given - let num_bytes = 400_000; - let encoder = blob::Encoder::default(); - assert_eq!(encoder.blobs_needed_to_encode(num_bytes), 4); - - // when - let gas_usage = BlobEncoder.gas_usage(num_bytes.try_into().unwrap()); - - // then - assert_eq!(gas_usage, 4 * DATA_GAS_PER_BLOB); - } -} diff --git a/packages/adapters/eth/src/websocket.rs b/packages/adapters/eth/src/websocket.rs index 9ccd8091..8fc40e02 100644 --- a/packages/adapters/eth/src/websocket.rs +++ b/packages/adapters/eth/src/websocket.rs @@ -1,16 +1,19 @@ -use std::{num::NonZeroU32, str::FromStr, time::Duration}; +use std::{num::NonZeroU32, ops::RangeInclusive, str::FromStr, time::Duration}; use ::metrics::{prometheus::core::Collector, HealthChecker, RegistersMetrics}; use alloy::{ consensus::SignableTransaction, network::TxSigner, primitives::{Address, ChainId, B256}, + rpc::types::FeeHistory, signers::{local::PrivateKeySigner, Signature}, }; +use delegate::delegate; use serde::Deserialize; use services::{ types::{ - BlockSubmissionTx, Fragment, FragmentsSubmitted, L1Tx, NonEmpty, TransactionResponse, U256, + BlockSubmissionTx, Fragment, FragmentsSubmitted, L1Height, L1Tx, NonEmpty, + TransactionResponse, U256, }, Result, }; @@ -20,7 +23,7 @@ use self::{ connection::WsConnection, health_tracking_middleware::{EthApi, HealthTrackingMiddleware}, }; -use crate::{AwsClient, AwsConfig}; +use crate::{fee_api_helpers::batch_requests, AwsClient, AwsConfig}; mod connection; mod health_tracking_middleware; @@ -32,6 +35,86 @@ pub struct WebsocketClient { contract_caller_address: Address, } +impl services::block_committer::port::l1::Contract for WebsocketClient { + delegate! { + to self { + async fn submit(&self, hash: [u8; 32], height: u32) -> Result; + fn commit_interval(&self) -> NonZeroU32; + } + } +} + +impl services::state_listener::port::l1::Api for WebsocketClient { + delegate! { + to (*self) { + async fn get_transaction_response(&self, tx_hash: [u8; 32],) -> Result>; + async fn is_squeezed_out(&self, tx_hash: [u8; 32],) -> Result; + } + } + + async fn get_block_number(&self) -> Result { + let block_num = self._get_block_number().await?; + let height = L1Height::try_from(block_num)?; + + Ok(height) + } +} + +impl services::wallet_balance_tracker::port::l1::Api for WebsocketClient { + delegate! { + to (*self) { + async fn balance(&self, address: Address) -> Result; + } + } +} + +impl services::block_committer::port::l1::Api for WebsocketClient { + delegate! { + to (*self) { + async fn get_transaction_response(&self, tx_hash: [u8; 32],) -> Result>; + } + } + + async fn get_block_number(&self) -> Result { + let block_num = self._get_block_number().await?; + let height = L1Height::try_from(block_num)?; + + Ok(height) + } +} + +impl services::fees::Api for WebsocketClient { + async fn current_height(&self) -> Result { + self._get_block_number().await + } + + async fn fees( + &self, + height_range: RangeInclusive, + ) -> Result { + batch_requests(height_range, move |sub_range, percentiles| async move { + self.fees(sub_range, percentiles).await + }) + .await + } +} + +impl services::state_committer::port::l1::Api for WebsocketClient { + async fn current_height(&self) -> Result { + self._get_block_number().await + } + + delegate! { + to (*self) { + async fn submit_state_fragments( + &self, + fragments: NonEmpty, + previous_tx: Option, + ) -> Result<(L1Tx, FragmentsSubmitted)>; + } + } +} + #[derive(Debug, Clone, PartialEq)] pub enum L1Key { Kms(String), @@ -201,7 +284,6 @@ impl WebsocketClient { }) } - #[must_use] pub fn connection_health_checker(&self) -> HealthChecker { self.inner.connection_health_checker() } @@ -225,6 +307,14 @@ impl WebsocketClient { Ok(self.inner.get_transaction_response(tx_hash).await?) } + pub(crate) async fn fees( + &self, + height_range: RangeInclusive, + rewards_percentile: &[f64], + ) -> Result { + Ok(self.inner.fees(height_range, rewards_percentile).await?) + } + pub(crate) async fn is_squeezed_out(&self, tx_hash: [u8; 32]) -> Result { Ok(self.inner.is_squeezed_out(tx_hash).await?) } @@ -274,7 +364,7 @@ impl RegistersMetrics for WebsocketClient { } #[cfg(test)] -mod test { +mod tests { use pretty_assertions::assert_eq; use super::L1Key; diff --git a/packages/adapters/eth/src/websocket/connection.rs b/packages/adapters/eth/src/websocket/connection.rs index 3461b6f6..e1cf1374 100644 --- a/packages/adapters/eth/src/websocket/connection.rs +++ b/packages/adapters/eth/src/websocket/connection.rs @@ -1,6 +1,7 @@ use std::{ cmp::{max, min}, num::NonZeroU32, + ops::RangeInclusive, time::Duration, }; @@ -14,7 +15,7 @@ use alloy::{ primitives::{Address, U256}, providers::{utils::Eip1559Estimation, Provider, ProviderBuilder, SendableTx, WsConnect}, pubsub::PubSubFrontend, - rpc::types::{TransactionReceipt, TransactionRequest}, + rpc::types::{FeeHistory, TransactionReceipt, TransactionRequest}, sol, }; use itertools::Itertools; @@ -30,8 +31,8 @@ use url::Url; use super::{health_tracking_middleware::EthApi, Signers}; use crate::{ + blob_encoder::{self}, error::{Error, Result}, - BlobEncoder, }; pub type WsProvider = alloy::providers::fillers::FillProvider< @@ -221,6 +222,19 @@ impl EthApi for WsConnection { Ok(submission_tx) } + async fn fees( + &self, + height_range: RangeInclusive, + reward_percentiles: &[f64], + ) -> Result { + let max = *height_range.end(); + let count = height_range.count() as u64; + Ok(self + .provider + .get_fee_history(count, BlockNumberOrTag::Number(max), reward_percentiles) + .await?) + } + async fn get_block_number(&self) -> Result { let response = self.provider.get_block_number().await?; Ok(response) @@ -271,7 +285,7 @@ impl EthApi for WsConnection { let num_fragments = min(fragments.len(), 6); let limited_fragments = fragments.into_iter().take(num_fragments); - let sidecar = BlobEncoder::sidecar_from_fragments(limited_fragments)?; + let sidecar = blob_encoder::BlobEncoder::sidecar_from_fragments(limited_fragments)?; let blob_tx = match previous_tx { Some(previous_tx) => { @@ -486,6 +500,7 @@ mod tests { use services::{block_bundler::port::l1::FragmentEncoder, types::nonempty}; use super::*; + use crate::blob_encoder; #[test] fn calculates_correctly_the_commit_height() { @@ -539,8 +554,8 @@ mod tests { }; let data = nonempty![1, 2, 3]; - let fragments = BlobEncoder.encode(data, 1.into()).unwrap(); - let sidecar = BlobEncoder::sidecar_from_fragments(fragments.clone()).unwrap(); + let fragments = blob_encoder::BlobEncoder.encode(data, 1.into()).unwrap(); + let sidecar = blob_encoder::BlobEncoder::sidecar_from_fragments(fragments.clone()).unwrap(); // create a tx with the help of the provider to get gas fields, hash etc let tx = TransactionRequest::default() @@ -617,7 +632,9 @@ mod tests { }; let data = nonempty![1, 2, 3]; - let fragment = BlobEncoder.encode(data, 1.try_into().unwrap()).unwrap(); + let fragment = blob_encoder::BlobEncoder + .encode(data, 1.try_into().unwrap()) + .unwrap(); // when let result = connection.submit_state_fragments(fragment, None).await; diff --git a/packages/adapters/eth/src/websocket/health_tracking_middleware.rs b/packages/adapters/eth/src/websocket/health_tracking_middleware.rs index 21f5c3aa..93045485 100644 --- a/packages/adapters/eth/src/websocket/health_tracking_middleware.rs +++ b/packages/adapters/eth/src/websocket/health_tracking_middleware.rs @@ -1,8 +1,9 @@ -use std::num::NonZeroU32; +use std::{num::NonZeroU32, ops::RangeInclusive}; use ::metrics::{ prometheus::core::Collector, ConnectionHealthTracker, HealthChecker, RegistersMetrics, }; +use alloy::rpc::types::FeeHistory; use delegate::delegate; use services::types::{Address, BlockSubmissionTx, Fragment, NonEmpty, TransactionResponse, U256}; @@ -15,6 +16,11 @@ use crate::{ #[async_trait::async_trait] pub trait EthApi { async fn submit(&self, hash: [u8; 32], height: u32) -> Result; + async fn fees( + &self, + height_range: RangeInclusive, + reward_percentiles: &[f64], + ) -> Result; async fn get_block_number(&self) -> Result; async fn balance(&self, address: Address) -> Result; fn commit_interval(&self) -> NonZeroU32; @@ -117,6 +123,16 @@ where response } + async fn fees( + &self, + height_range: RangeInclusive, + reward_percentiles: &[f64], + ) -> Result { + let response = self.adapter.fees(height_range, reward_percentiles).await; + self.note_network_status(&response); + response + } + async fn is_squeezed_out(&self, tx_hash: [u8; 32]) -> Result { let response = self.adapter.is_squeezed_out(tx_hash).await; self.note_network_status(&response); diff --git a/packages/adapters/fuel/src/client.rs b/packages/adapters/fuel/src/client.rs index 8bc10c57..4a8e89bd 100644 --- a/packages/adapters/fuel/src/client.rs +++ b/packages/adapters/fuel/src/client.rs @@ -12,11 +12,13 @@ use futures::{stream, Stream, StreamExt}; use metrics::{ prometheus::core::Collector, ConnectionHealthTracker, HealthChecker, RegistersMetrics, }; -use services::types::{CompressedFuelBlock, NonEmpty}; +use services::{ + types::{CompressedFuelBlock, NonEmpty}, + Error, Result, +}; use url::Url; use crate::metrics::Metrics; -use services::{Error, Result}; #[derive(Clone)] pub struct HttpClient { @@ -27,7 +29,6 @@ pub struct HttpClient { } impl HttpClient { - #[must_use] pub fn new( url: &Url, unhealthy_after_n_errors: usize, @@ -164,7 +165,6 @@ impl HttpClient { } } - #[must_use] pub fn connection_health_checker(&self) -> HealthChecker { self.health_tracker.tracker() } diff --git a/packages/adapters/fuel/src/lib.rs b/packages/adapters/fuel/src/lib.rs index 82189de9..2c54a593 100644 --- a/packages/adapters/fuel/src/lib.rs +++ b/packages/adapters/fuel/src/lib.rs @@ -9,7 +9,6 @@ mod metrics; pub use client::*; use delegate::delegate; - use services::Result; impl services::block_importer::port::fuel::Api for client::HttpClient { diff --git a/packages/adapters/storage/src/lib.rs b/packages/adapters/storage/src/lib.rs index 765f0d69..e1986aba 100644 --- a/packages/adapters/storage/src/lib.rs +++ b/packages/adapters/storage/src/lib.rs @@ -178,6 +178,10 @@ impl services::state_committer::port::Storage for Postgres { async fn get_latest_pending_txs(&self) -> Result> { self._get_latest_pending_txs().await.map_err(Into::into) } + + async fn latest_bundled_height(&self) -> Result> { + self._latest_bundled_height().await.map_err(Into::into) + } } impl services::state_pruner::port::Storage for Postgres { diff --git a/packages/adapters/storage/src/mappings/tables.rs b/packages/adapters/storage/src/mappings/tables.rs index d93fbee7..626d431a 100644 --- a/packages/adapters/storage/src/mappings/tables.rs +++ b/packages/adapters/storage/src/mappings/tables.rs @@ -193,6 +193,7 @@ pub struct BundleFragment { pub data: Vec, pub unused_bytes: i64, pub total_bytes: i64, + pub start_height: i64, } impl TryFrom for services::types::storage::BundleFragment { @@ -261,10 +262,18 @@ impl TryFrom for services::types::storage::BundleFragment { total_bytes, }; + let start_height = value.start_height.try_into().map_err(|e| { + crate::error::Error::Conversion(format!( + "Invalid db `start_height` ({}). Reason: {e}", + value.start_height + )) + })?; + Ok(Self { id, - idx, bundle_id, + idx, + oldest_block_in_bundle: start_height, fragment, }) } diff --git a/packages/adapters/storage/src/postgres.rs b/packages/adapters/storage/src/postgres.rs index 8d177eda..e3a71513 100644 --- a/packages/adapters/storage/src/postgres.rs +++ b/packages/adapters/storage/src/postgres.rs @@ -279,7 +279,8 @@ impl Postgres { sub.bundle_id, sub.data, sub.unused_bytes, - sub.total_bytes + sub.total_bytes, + sub.start_height FROM ( SELECT DISTINCT ON (f.id) f.*, @@ -325,11 +326,14 @@ impl Postgres { let fragments = sqlx::query_as!( tables::BundleFragment, r#" - SELECT f.* - FROM l1_fragments f - JOIN l1_transaction_fragments tf ON tf.fragment_id = f.id - JOIN l1_blob_transaction t ON t.id = tf.transaction_id - WHERE t.hash = $1 + SELECT + f.*, + b.start_height + FROM l1_fragments f + JOIN l1_transaction_fragments tf ON tf.fragment_id = f.id + JOIN l1_blob_transaction t ON t.id = tf.transaction_id + JOIN bundles b ON b.id = f.bundle_id + WHERE t.hash = $1 "#, tx_hash.as_slice() ) @@ -617,6 +621,19 @@ impl Postgres { .transpose() } + pub(crate) async fn _latest_bundled_height(&self) -> Result> { + sqlx::query!("SELECT MAX(end_height) AS latest_bundled_height FROM bundles") + .fetch_one(&self.connection_pool) + .await? + .latest_bundled_height + .map(|height| { + u32::try_from(height).map_err(|_| { + crate::error::Error::Conversion(format!("invalid block height: {height}")) + }) + }) + .transpose() + } + pub(crate) async fn _update_tx_state( &self, hash: [u8; 32], diff --git a/packages/adapters/storage/src/test_instance.rs b/packages/adapters/storage/src/test_instance.rs index 6d9d70d7..7490772e 100644 --- a/packages/adapters/storage/src/test_instance.rs +++ b/packages/adapters/storage/src/test_instance.rs @@ -339,6 +339,10 @@ impl services::state_committer::port::Storage for DbWithProcess { async fn get_latest_pending_txs(&self) -> services::Result> { self.db._get_latest_pending_txs().await.map_err(Into::into) } + + async fn latest_bundled_height(&self) -> services::Result> { + self.db._latest_bundled_height().await.map_err(Into::into) + } } impl services::status_reporter::port::Storage for DbWithProcess { diff --git a/packages/encoding/src/blob/decoder.rs b/packages/encoding/src/blob/decoder.rs index 0386380f..e372d2fe 100644 --- a/packages/encoding/src/blob/decoder.rs +++ b/packages/encoding/src/blob/decoder.rs @@ -48,7 +48,7 @@ impl Decoder { } #[cfg(test)] -mod test { +mod tests { use bitvec::{order::Msb0, vec::BitVec}; use crate::blob::{self, Blob, Header}; diff --git a/packages/encoding/src/blob/encoder.rs b/packages/encoding/src/blob/encoder.rs index 4211885e..652bd351 100644 --- a/packages/encoding/src/blob/encoder.rs +++ b/packages/encoding/src/blob/encoder.rs @@ -14,14 +14,12 @@ pub struct Encoder { } impl Encoder { - #[must_use] pub fn new() -> Self { Self::default() } } impl Encoder { - #[must_use] pub const fn blobs_needed_to_encode(&self, num_bytes: usize) -> usize { #[allow(clippy::cast_possible_truncation)] const USABLE_BITS_PER_BLOB: usize = USABLE_BITS_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB; @@ -50,7 +48,7 @@ impl Encoder { } #[cfg(test)] -mod test { +mod tests { #[test] fn can_handle_zero_input() { // given diff --git a/packages/encoding/src/blob/header.rs b/packages/encoding/src/blob/header.rs index 7c618ebe..1f43185b 100644 --- a/packages/encoding/src/blob/header.rs +++ b/packages/encoding/src/blob/header.rs @@ -120,7 +120,7 @@ impl Header { } #[cfg(test)] -mod test { +mod tests { use bitvec::{field::BitField, order::Msb0}; use super::Header; diff --git a/packages/encoding/src/bundle/decoder.rs b/packages/encoding/src/bundle/decoder.rs index 51692b5e..3e2ddcb0 100644 --- a/packages/encoding/src/bundle/decoder.rs +++ b/packages/encoding/src/bundle/decoder.rs @@ -40,7 +40,7 @@ impl Decoder { } #[cfg(test)] -mod test { +mod tests { use crate::bundle::{Bundle, BundleV1, Encoder}; #[test] diff --git a/packages/encoding/src/bundle/encoder.rs b/packages/encoding/src/bundle/encoder.rs index 241bfd90..b4b45fbf 100644 --- a/packages/encoding/src/bundle/encoder.rs +++ b/packages/encoding/src/bundle/encoder.rs @@ -130,7 +130,6 @@ impl FromStr for CompressionLevel { #[allow(dead_code)] impl CompressionLevel { - #[must_use] pub fn levels() -> Vec { vec![ Self::Disabled, @@ -150,7 +149,7 @@ impl CompressionLevel { } #[cfg(test)] -mod test { +mod tests { use crate::bundle::{CompressionLevel, Encoder}; #[test] diff --git a/packages/encoding/tests/public_api.rs b/packages/encoding/tests/public_api.rs index 392998be..a1e4e9ae 100644 --- a/packages/encoding/tests/public_api.rs +++ b/packages/encoding/tests/public_api.rs @@ -1,5 +1,5 @@ #[cfg(test)] -mod test { +mod tests { use alloy::{consensus::EnvKzgSettings, eips::eip4844::DATA_GAS_PER_BLOB}; use bitvec::{order::Msb0, vec::BitVec}; use fuel_block_committer_encoding::{ diff --git a/packages/metrics/src/connection_health_tracker.rs b/packages/metrics/src/connection_health_tracker.rs index b2913cc5..dc2e3e4d 100644 --- a/packages/metrics/src/connection_health_tracker.rs +++ b/packages/metrics/src/connection_health_tracker.rs @@ -14,7 +14,6 @@ pub struct ConnectionHealthTracker { } impl ConnectionHealthTracker { - #[must_use] pub fn new(max_consecutive_failures: usize) -> Self { Self { max_consecutive_failures, @@ -30,7 +29,6 @@ impl ConnectionHealthTracker { self.consecutive_failures.store(0, Ordering::SeqCst); } - #[must_use] pub fn tracker(&self) -> HealthChecker { Box::new(self.clone()) } diff --git a/packages/services/Cargo.toml b/packages/services/Cargo.toml index d23a2313..49e33118 100644 --- a/packages/services/Cargo.toml +++ b/packages/services/Cargo.toml @@ -13,7 +13,6 @@ rust-version = { workspace = true } alloy = { workspace = true } async-trait = { workspace = true } bytesize = { workspace = true } -delegate = { workspace = true } fuel-block-committer-encoding = { workspace = true } fuel-core-client = { workspace = true } fuel-crypto = { workspace = true } @@ -35,7 +34,6 @@ trait-variant = { workspace = true } [dev-dependencies] test-case = { workspace = true } clock = { workspace = true, features = ["test-helpers"] } -delegate = { workspace = true } eth = { workspace = true, features = ["test-helpers"] } fuel-crypto = { workspace = true, features = ["random"] } mockall = { workspace = true } diff --git a/packages/services/src/block_bundler.rs b/packages/services/src/block_bundler.rs index b475f37d..dd9397de 100644 --- a/packages/services/src/block_bundler.rs +++ b/packages/services/src/block_bundler.rs @@ -3,11 +3,6 @@ pub mod bundler; pub mod service { use std::{num::NonZeroUsize, time::Duration}; - use super::bundler::{Bundle, BundleProposal, BundlerFactory, Metadata}; - use crate::{ - types::{DateTime, Utc}, - Error, Result, Runner, - }; use metrics::{ custom_exponential_buckets, prometheus::{histogram_opts, linear_buckets, Histogram, IntGauge}, @@ -15,6 +10,12 @@ pub mod service { }; use tracing::info; + use super::bundler::{Bundle, BundleProposal, BundlerFactory, Metadata}; + use crate::{ + types::{DateTime, Utc}, + Error, Result, Runner, + }; + #[derive(Debug, Clone, Copy)] pub struct Config { pub optimization_time_limit: Duration, @@ -159,7 +160,7 @@ pub mod service { ) .await? { - let still_time_to_accumulate_more = self.still_time_to_accumulate_more().await?; + let still_time_to_accumulate_more = self.still_time_to_accumulate_more()?; if blocks.len() < self.config.num_blocks_to_accumulate && still_time_to_accumulate_more { @@ -230,7 +231,7 @@ pub mod service { bundler.finish().await } - async fn still_time_to_accumulate_more(&self) -> Result { + fn still_time_to_accumulate_more(&self) -> Result { let elapsed = self.elapsed(self.last_time_bundled)?; Ok(elapsed < self.config.block_accumulation_time_limit) @@ -281,7 +282,7 @@ pub mod port { #[allow(async_fn_in_trait)] #[trait_variant::make(Send)] #[cfg_attr(feature = "test-helpers", mockall::automock)] - pub trait Api: Send + Sync { + pub trait Api: Sync { async fn latest_height(&self) -> crate::Result; } } @@ -308,7 +309,7 @@ pub mod port { #[allow(async_fn_in_trait)] #[trait_variant::make(Send)] - pub trait Storage: Send + Sync { + pub trait Storage: Sync { async fn lowest_sequence_of_unbundled_blocks( &self, starting_height: u32, @@ -332,13 +333,13 @@ pub mod port { pub mod test_helpers { use std::num::NonZeroUsize; - use crate::types::{storage::SequentialFuelBlocks, NonNegative}; use tokio::sync::{ mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, Mutex, }; use super::bundler::{Bundle, BundleProposal, BundlerFactory}; + use crate::types::{storage::SequentialFuelBlocks, NonNegative}; pub struct ControllableBundler { can_advance: UnboundedReceiver<()>, diff --git a/packages/services/src/block_bundler/bundler.rs b/packages/services/src/block_bundler/bundler.rs index 5a30c84b..f9ebffe1 100644 --- a/packages/services/src/block_bundler/bundler.rs +++ b/packages/services/src/block_bundler/bundler.rs @@ -1,5 +1,10 @@ use std::{cmp::min, collections::VecDeque, fmt::Display, num::NonZeroUsize, ops::RangeInclusive}; +use bytesize::ByteSize; +use fuel_block_committer_encoding::bundle::{self, BundleV1}; +use itertools::Itertools; +use rayon::prelude::*; + use crate::{ types::{ storage::SequentialFuelBlocks, CollectNonEmpty, CompressedFuelBlock, Fragment, NonEmpty, @@ -7,12 +12,8 @@ use crate::{ }, Result, }; -use bytesize::ByteSize; -use fuel_block_committer_encoding::bundle::{self, BundleV1}; -use itertools::Itertools; -use rayon::prelude::*; -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct Metadata { pub block_heights: RangeInclusive, pub known_to_be_optimal: bool, @@ -28,6 +29,8 @@ impl Metadata { self.block_heights.clone().count() } + // This is for metrics anyway, precision loss is ok + #[allow(clippy::cast_precision_loss)] pub fn compression_ratio(&self) -> f64 { self.uncompressed_data_size.get() as f64 / self.compressed_data_size.get() as f64 } @@ -55,7 +58,7 @@ impl Display for Metadata { } } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct BundleProposal { pub fragments: NonEmpty, pub metadata: Metadata, diff --git a/packages/services/src/block_committer.rs b/packages/services/src/block_committer.rs index 92c4d19b..c0c3559e 100644 --- a/packages/services/src/block_committer.rs +++ b/packages/services/src/block_committer.rs @@ -1,13 +1,12 @@ pub mod service { use std::num::NonZeroU32; + use tracing::info; + use crate::{ types::{fuel::FuelBlock, BlockSubmission, NonNegative, TransactionState}, - Error, Result, + Error, Result, Runner, }; - use tracing::info; - - use crate::Runner; pub struct BlockCommitter { l1_adapter: L1, @@ -222,7 +221,7 @@ pub mod port { #[allow(async_fn_in_trait)] #[trait_variant::make(Send)] #[cfg_attr(feature = "test-helpers", mockall::automock)] - pub trait Contract: Send + Sync { + pub trait Contract: Sync { async fn submit(&self, hash: [u8; 32], height: u32) -> Result; fn commit_interval(&self) -> NonZeroU32; } @@ -240,13 +239,14 @@ pub mod port { } pub mod fuel { - use crate::Result; pub use fuel_core_client::client::types::block::Block as FuelBlock; + use crate::Result; + #[allow(async_fn_in_trait)] #[trait_variant::make(Send)] #[cfg_attr(feature = "test-helpers", mockall::automock)] - pub trait Api: Send + Sync { + pub trait Api: Sync { async fn latest_block(&self) -> Result; async fn block_at_height(&self, height: u32) -> Result>; } @@ -254,7 +254,7 @@ pub mod port { #[allow(async_fn_in_trait)] #[trait_variant::make(Send)] - pub trait Storage: Send + Sync { + pub trait Storage: Sync { async fn record_block_submission( &self, submission_tx: BlockSubmissionTx, diff --git a/packages/services/src/block_importer.rs b/packages/services/src/block_importer.rs index a95fb2d1..ef9a7e79 100644 --- a/packages/services/src/block_importer.rs +++ b/packages/services/src/block_importer.rs @@ -1,10 +1,11 @@ pub mod service { + use futures::TryStreamExt; + use tracing::info; + use crate::{ types::{nonempty, CompressedFuelBlock, NonEmpty}, Result, Runner, }; - use futures::TryStreamExt; - use tracing::info; /// The `BlockImporter` is responsible for importing blocks from the Fuel blockchain /// into local storage. It fetches blocks from the Fuel API @@ -82,7 +83,7 @@ pub mod port { #[allow(async_fn_in_trait)] #[trait_variant::make(Send)] - pub trait Storage: Send + Sync { + pub trait Storage: Sync { async fn insert_blocks(&self, block: NonEmpty) -> Result<()>; async fn missing_blocks( &self, @@ -101,7 +102,7 @@ pub mod port { #[allow(async_fn_in_trait)] #[trait_variant::make(Send)] #[cfg_attr(feature = "test-helpers", mockall::automock)] - pub trait Api: Send + Sync { + pub trait Api: Sync { fn compressed_blocks_in_height_range( &self, range: RangeInclusive, diff --git a/packages/services/src/fee_metrics_tracker.rs b/packages/services/src/fee_metrics_tracker.rs new file mode 100644 index 00000000..831be648 --- /dev/null +++ b/packages/services/src/fee_metrics_tracker.rs @@ -0,0 +1,100 @@ +pub mod service { + use std::{num::NonZeroU64, ops::RangeInclusive}; + + use metrics::{ + prometheus::{core::Collector, IntGauge, Opts}, + RegistersMetrics, + }; + + use crate::{ + fees::{Api, Fees}, + Result, Runner, + }; + + #[derive(Debug, Clone)] + struct FeeMetrics { + current: IntGauge, + } + + impl Default for FeeMetrics { + fn default() -> Self { + let current = IntGauge::with_opts(Opts::new( + "current_blob_tx_fee", + "The current fee for a transaction with 6 blobs", + )) + .expect("metric config to be correct"); + + Self { current } + } + } + + impl

RegistersMetrics for FeeMetricsTracker

{ + fn metrics(&self) -> Vec> { + vec![Box::new(self.metrics.current.clone())] + } + } + + #[derive(Clone)] + pub struct FeeMetricsTracker

{ + fee_provider: P, + metrics: FeeMetrics, + } + + pub fn calculate_blob_tx_fee(num_blobs: u32, fees: &Fees) -> u128 { + const DATA_GAS_PER_BLOB: u128 = 131_072u128; + const INTRINSIC_GAS: u128 = 21_000u128; + + let base_fee = INTRINSIC_GAS.saturating_mul(fees.base_fee_per_gas); + let blob_fee = fees + .base_fee_per_blob_gas + .saturating_mul(u128::from(num_blobs)) + .saturating_mul(DATA_GAS_PER_BLOB); + let reward_fee = fees.reward.saturating_mul(INTRINSIC_GAS); + + base_fee.saturating_add(blob_fee).saturating_add(reward_fee) + } + + const fn last_n_blocks(current_block: u64, n: NonZeroU64) -> RangeInclusive { + current_block.saturating_sub(n.get().saturating_sub(1))..=current_block + } + + impl

FeeMetricsTracker

{ + pub fn new(fee_provider: P) -> Self { + Self { + fee_provider, + metrics: FeeMetrics::default(), + } + } + } + + impl FeeMetricsTracker

{ + pub async fn update_metrics(&self) -> Result<()> { + let current_block = self.fee_provider.current_height().await?; + let tx_fees_for_last_n_blocks = |n| async move { + let fees = self + .fee_provider + .fees(last_n_blocks(current_block, n)) + .await? + .mean(); + + Result::Ok(i64::try_from(calculate_blob_tx_fee(6, &fees)).unwrap_or(i64::MAX)) + }; + + let current = tx_fees_for_last_n_blocks(1.try_into().expect("not zero")).await?; + + self.metrics.current.set(current); + + Ok(()) + } + } + + impl

Runner for FeeMetricsTracker

+ where + P: Api + Send + Sync, + { + async fn run(&mut self) -> Result<()> { + self.update_metrics().await?; + Ok(()) + } + } +} diff --git a/packages/services/src/fees.rs b/packages/services/src/fees.rs new file mode 100644 index 00000000..58857316 --- /dev/null +++ b/packages/services/src/fees.rs @@ -0,0 +1,149 @@ +pub mod cache; +#[cfg(feature = "test-helpers")] +pub mod testing; + +use std::ops::RangeInclusive; + +use itertools::Itertools; +use serde::{Deserialize, Serialize}; + +// The cleanest solution would probably be for the state committer and the fee metrics tracker to +// have their own fee api in their respective ports, but having this common deduplicates a lot of +// code. +#[allow(async_fn_in_trait)] +#[trait_variant::make(Send)] +#[cfg_attr(feature = "test-helpers", mockall::automock)] +pub trait Api { + async fn fees(&self, height_range: RangeInclusive) -> crate::Result; + async fn current_height(&self) -> crate::Result; +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub struct Fees { + pub base_fee_per_gas: u128, + pub reward: u128, + pub base_fee_per_blob_gas: u128, +} + +impl Default for Fees { + fn default() -> Self { + Self { + base_fee_per_gas: 1.try_into().unwrap(), + reward: 1.try_into().unwrap(), + base_fee_per_blob_gas: 1.try_into().unwrap(), + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub struct FeesAtHeight { + pub height: u64, + pub fees: Fees, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct SequentialBlockFees { + fees: Vec, +} + +// Doesn't detect that we use the contents in the Display impl +#[allow(dead_code)] +#[derive(Debug)] +pub struct InvalidSequence(String); + +impl std::error::Error for InvalidSequence {} + +impl std::fmt::Display for InvalidSequence { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{self:?}") + } +} + +impl IntoIterator for SequentialBlockFees { + type Item = FeesAtHeight; + type IntoIter = std::vec::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.fees.into_iter() + } +} + +impl FromIterator for Result { + fn from_iter>(iter: T) -> Self { + SequentialBlockFees::try_from(iter.into_iter().collect::>()) + } +} + +// Cannot be empty +#[allow(clippy::len_without_is_empty)] +impl SequentialBlockFees { + pub fn iter(&self) -> impl Iterator { + self.fees.iter() + } + + pub fn last(&self) -> &FeesAtHeight { + self.fees.last().expect("not empty") + } + + pub fn mean(&self) -> Fees { + let count = self.len() as u128; + + let total = self + .fees + .iter() + .map(|bf| bf.fees) + .fold(Fees::default(), |acc, f| { + let base_fee_per_gas = acc.base_fee_per_gas.saturating_add(f.base_fee_per_gas); + let reward = acc.reward.saturating_add(f.reward); + let base_fee_per_blob_gas = acc + .base_fee_per_blob_gas + .saturating_add(f.base_fee_per_blob_gas); + + Fees { + base_fee_per_gas, + reward, + base_fee_per_blob_gas, + } + }); + + Fees { + base_fee_per_gas: total.base_fee_per_gas.saturating_div(count), + reward: total.reward.saturating_div(count), + base_fee_per_blob_gas: total.base_fee_per_blob_gas.saturating_div(count), + } + } + + pub fn len(&self) -> usize { + self.fees.len() + } + + pub fn height_range(&self) -> RangeInclusive { + let start = self.fees.first().expect("not empty").height; + let end = self.fees.last().expect("not empty").height; + start..=end + } +} + +impl TryFrom> for SequentialBlockFees { + type Error = InvalidSequence; + fn try_from(mut fees: Vec) -> Result { + if fees.is_empty() { + return Err(InvalidSequence("Input cannot be empty".to_string())); + } + + fees.sort_by_key(|f| f.height); + + let is_sequential = fees + .iter() + .tuple_windows() + .all(|(l, r)| l.height + 1 == r.height); + + let heights = fees.iter().map(|f| f.height).collect::>(); + if !is_sequential { + return Err(InvalidSequence(format!( + "blocks are not sequential by height: {heights:?}" + ))); + } + + Ok(Self { fees }) + } +} diff --git a/packages/services/src/fees/cache.rs b/packages/services/src/fees/cache.rs new file mode 100644 index 00000000..9ba25d7a --- /dev/null +++ b/packages/services/src/fees/cache.rs @@ -0,0 +1,356 @@ +use std::{collections::BTreeMap, ops::RangeInclusive, sync::Arc}; + +use tokio::sync::Mutex; + +use super::{Api, Fees, FeesAtHeight, SequentialBlockFees}; +use crate::{Error, Result}; + +#[derive(Debug, Clone)] +pub struct CachingApi

{ + fees_provider: P, + // preferred over RwLock because of simplicity + cache: Arc>>, + cache_limit: usize, +} + +impl

CachingApi

{ + pub const fn inner(&self) -> &P { + &self.fees_provider + } + + pub fn new(fees_provider: P, cache_limit: usize) -> Self { + Self { + fees_provider, + cache: Arc::new(Mutex::new(BTreeMap::new())), + cache_limit, + } + } + + pub async fn import(&self, fees: impl IntoIterator) { + self.cache.lock().await.extend(fees); + } + + pub async fn export(&self) -> impl IntoIterator { + self.cache.lock().await.clone() + } +} + +impl Api for CachingApi

{ + async fn fees(&self, height_range: RangeInclusive) -> Result { + self.get_fees(height_range).await + } + + async fn current_height(&self) -> Result { + self.fees_provider.current_height().await + } +} + +impl CachingApi

{ + async fn download_missing_fees( + &self, + available_fees: &[FeesAtHeight], + height_range: RangeInclusive, + ) -> Result> { + let mut fees = vec![]; + for range in detect_missing_ranges(available_fees, height_range) { + let new_fees = self.fees_provider.fees(range.clone()).await?; + fees.extend(new_fees); + } + + Ok(fees) + } + + pub async fn get_fees(&self, height_range: RangeInclusive) -> Result { + let mut cache_guard = self.cache.lock().await; + let mut fees = Self::read_cached_fees(&cache_guard, height_range.clone()); + let missing_fees = self.download_missing_fees(&fees, height_range).await?; + + self.update_cache(&mut cache_guard, missing_fees.clone()); + drop(cache_guard); + + fees.extend(missing_fees); + fees.sort_by_key(|f| f.height); + + SequentialBlockFees::try_from(fees).map_err(|e| Error::Other(e.to_string())) + } + + fn read_cached_fees( + cache: &BTreeMap, + height_range: RangeInclusive, + ) -> Vec { + cache + .range(height_range) + .map(|(height, fees)| FeesAtHeight { + height: *height, + fees: *fees, + }) + .collect() + } + + fn update_cache(&self, cache: &mut BTreeMap, fees: Vec) { + cache.extend(fees.into_iter().map(|bf| (bf.height, bf.fees))); + + while cache.len() > self.cache_limit { + cache.pop_first(); + } + } +} + +fn detect_missing_ranges( + available_fees: &[FeesAtHeight], + height_range: RangeInclusive, +) -> Vec> { + if available_fees.is_empty() { + return vec![height_range]; + } + + let (last_height, mut missing_ranges) = available_fees.iter().map(|bf| bf.height).fold( + (None, Vec::new()), + |(prev, mut acc), current| { + match prev { + Some(prev_h) => { + if current > prev_h + 1 { + // Found a gap between prev_h and current + acc.push((prev_h + 1)..=current.saturating_sub(1)); + } + } + None => { + if current > *height_range.start() { + // Missing range before the first available height + acc.push(*height_range.start()..=current.saturating_sub(1)); + } + } + } + (Some(current), acc) + }, + ); + + // Check for a missing range after the last available height + if let Some(last_h) = last_height { + if last_h < *height_range.end() { + missing_ranges.push((last_h + 1)..=*height_range.end()); + } + } + + missing_ranges +} + +#[cfg(test)] +mod tests { + use std::ops::RangeInclusive; + + use mockall::predicate::eq; + + use crate::fees::{cache::CachingApi, Fees, FeesAtHeight, MockApi, SequentialBlockFees}; + + #[tokio::test] + async fn evicts_oldest_blocks() { + // given + let mut mock_provider = MockApi::new(); + + mock_provider + .expect_fees() + .with(eq(0..=4)) + .times(2) + .returning(|range| Box::pin(async { Ok(generate_sequential_fees(range)) })); + + mock_provider + .expect_fees() + .with(eq(5..=9)) + .times(1) + .returning(|range| Box::pin(async { Ok(generate_sequential_fees(range)) })); + + let provider = CachingApi::new(mock_provider, 5); + let _ = provider.get_fees(0..=4).await.unwrap(); + let _ = provider.get_fees(5..=9).await.unwrap(); + + // when + let _ = provider.get_fees(0..=4).await.unwrap(); + + // then + // will refetch 0..=4 due to eviction + } + + #[tokio::test] + async fn handles_request_larger_than_cache() { + use mockall::predicate::*; + + // given + let mut mock_provider = MockApi::new(); + + let cache_limit = 5; + + mock_provider + .expect_fees() + .with(eq(0..=9)) + .times(1) + .returning(|range| Box::pin(async move { Ok(generate_sequential_fees(range)) })); + + let provider = CachingApi::new(mock_provider, cache_limit); + + // when + let result = provider.get_fees(0..=9).await.unwrap(); + + // then + assert_eq!(result, generate_sequential_fees(0..=9)); + } + + #[tokio::test] + async fn import_and_export() { + // given + let mock_provider = MockApi::new(); + + let cache_limit = 10; + + let provider = CachingApi::new(mock_provider, cache_limit); + + let fees_to_import = (0..5) + .map(|h| { + let fee = u128::from(h) + 1; + ( + h, + Fees { + base_fee_per_gas: fee, + reward: fee, + base_fee_per_blob_gas: fee, + }, + ) + }) + .collect::>(); + + // when + provider.import(fees_to_import.clone()).await; + let exported = provider.export().await.into_iter().collect::>(); + + // then + assert_eq!(exported, fees_to_import); + } + + #[tokio::test] + async fn handles_single_element_range() { + // given + let mut mock_provider = MockApi::new(); + + mock_provider + .expect_fees() + .with(eq(3..=3)) + .once() + .returning(|range| Box::pin(async move { Ok(generate_sequential_fees(range)) })); + + mock_provider + .expect_current_height() + .returning(|| Box::pin(async { Ok(10) })); + + let provider = CachingApi::new(mock_provider, 10); + + // when + let result = provider.get_fees(3..=3).await.unwrap(); + + // then + let expected = generate_sequential_fees(3..=3); + assert_eq!(result, expected); + } + + #[tokio::test] + async fn handles_overlapping_ranges() { + // given + let mut mock_provider = MockApi::new(); + + let mut sequence = mockall::Sequence::new(); + + // First fetch 0..=4 + mock_provider + .expect_fees() + .with(eq(0..=4)) + .once() + .in_sequence(&mut sequence) + .returning(|range| Box::pin(async move { Ok(generate_sequential_fees(range)) })); + + // Then fetch 3..=7 (only 5..=7 should be fetched) + mock_provider + .expect_fees() + .with(eq(5..=7)) + .once() + .in_sequence(&mut sequence) + .returning(|range| Box::pin(async move { Ok(generate_sequential_fees(range)) })); + + mock_provider + .expect_current_height() + .returning(|| Box::pin(async { Ok(10) })); + + let provider = CachingApi::new(mock_provider, 10); + + // when + let first_call = provider.get_fees(0..=4).await.unwrap(); + let second_call = provider.get_fees(0..=7).await.unwrap(); + + // then + let expected_first = generate_sequential_fees(0..=4); + let expected_second = generate_sequential_fees(0..=7); + assert_eq!(first_call, expected_first); + assert_eq!(second_call, expected_second); + } + + #[tokio::test] + async fn updates_cache_correctly() { + // given + let mut mock_provider = MockApi::new(); + + mock_provider + .expect_fees() + .with(eq(0..=4)) + .once() + .returning(|range| Box::pin(async move { Ok(generate_sequential_fees(range)) })); + + mock_provider + .expect_fees() + .with(eq(5..=5)) + .once() + .returning(|range| Box::pin(async move { Ok(generate_sequential_fees(range)) })); + + mock_provider + .expect_current_height() + .returning(|| Box::pin(async { Ok(10) })); + + let provider = CachingApi::new(mock_provider, 6); + + // when + let first_call = provider.get_fees(0..=4).await.unwrap(); + let second_call = provider.get_fees(0..=5).await.unwrap(); + + let exported = provider.export().await.into_iter().collect::>(); + + // then + let expected_first = generate_sequential_fees(0..=4); + let expected_second = generate_sequential_fees(0..=5); + assert_eq!(first_call, expected_first); + assert_eq!(second_call, expected_second); + assert_eq!(exported.len(), 6); + assert_eq!( + exported, + generate_sequential_fees(0..=5) + .into_iter() + .map(|bf| (bf.height, bf.fees)) + .collect::>() + ); + } + + fn generate_sequential_fees(height_range: RangeInclusive) -> SequentialBlockFees { + SequentialBlockFees::try_from( + height_range + .map(|h| { + let fee = u128::from(h + 1); + FeesAtHeight { + height: h, + fees: Fees { + base_fee_per_gas: fee, + reward: fee, + base_fee_per_blob_gas: fee, + }, + } + }) + .collect::>(), + ) + .unwrap() + } +} diff --git a/packages/services/src/fees/testing.rs b/packages/services/src/fees/testing.rs new file mode 100644 index 00000000..9e530d3f --- /dev/null +++ b/packages/services/src/fees/testing.rs @@ -0,0 +1,72 @@ +use std::{collections::BTreeMap, ops::RangeInclusive}; + +use itertools::Itertools; + +use super::{Api, Fees, FeesAtHeight, SequentialBlockFees}; + +#[derive(Debug, Clone, Copy)] +pub struct ConstantFeeApi { + fees: Fees, +} + +impl ConstantFeeApi { + pub const fn new(fees: Fees) -> Self { + Self { fees } + } +} + +impl Api for ConstantFeeApi { + async fn fees(&self, height_range: RangeInclusive) -> crate::Result { + let fees = height_range + .into_iter() + .map(|height| FeesAtHeight { + height, + fees: self.fees, + }) + .collect_vec(); + + Ok(fees.try_into().unwrap()) + } + + async fn current_height(&self) -> crate::Result { + Ok(0) + } +} + +#[derive(Debug, Clone)] +pub struct PreconfiguredFeeApi { + fees: BTreeMap, +} + +impl Api for PreconfiguredFeeApi { + async fn current_height(&self) -> crate::Result { + Ok(*self + .fees + .keys() + .last() + .expect("no fees registered with PreconfiguredFeesProvider")) + } + + async fn fees(&self, height_range: RangeInclusive) -> crate::Result { + let fees = self + .fees + .iter() + .skip_while(|(height, _)| !height_range.contains(height)) + .take_while(|(height, _)| height_range.contains(height)) + .map(|(height, fees)| FeesAtHeight { + height: *height, + fees: *fees, + }) + .collect_vec(); + + Ok(fees.try_into().expect("block fees not sequential")) + } +} + +impl PreconfiguredFeeApi { + pub fn new(blocks: impl IntoIterator) -> Self { + Self { + fees: blocks.into_iter().collect(), + } + } +} diff --git a/packages/services/src/health_reporter.rs b/packages/services/src/health_reporter.rs index b5e9c8f3..6cee715a 100644 --- a/packages/services/src/health_reporter.rs +++ b/packages/services/src/health_reporter.rs @@ -20,7 +20,6 @@ pub mod service { } impl HealthReporter { - #[must_use] pub fn new(fuel_health_check: HealthChecker, eth_health_check: HealthChecker) -> Self { Self { fuel_connection: fuel_health_check, @@ -28,7 +27,6 @@ pub mod service { } } - #[must_use] pub fn report(&self) -> HealthReport { HealthReport { fuel_connection_up: self.fuel_connection.healthy(), diff --git a/packages/services/src/lib.rs b/packages/services/src/lib.rs index 0e1391a1..0ee52080 100644 --- a/packages/services/src/lib.rs +++ b/packages/services/src/lib.rs @@ -2,6 +2,7 @@ pub mod block_bundler; pub mod block_committer; pub mod block_importer; pub mod cost_reporter; +pub mod fee_metrics_tracker; pub mod health_reporter; pub mod state_committer; pub mod state_listener; @@ -10,9 +11,11 @@ pub mod status_reporter; pub mod types; pub mod wallet_balance_tracker; +pub mod fees; + pub use block_bundler::{ - bundler::Factory as BundlerFactory, service::BlockBundler, - service::Config as BlockBundlerConfig, + bundler::Factory as BundlerFactory, + service::{BlockBundler, Config as BlockBundlerConfig}, }; #[cfg(feature = "test-helpers")] pub use block_bundler::{ @@ -22,7 +25,7 @@ pub use block_bundler::{ pub use state_committer::service::{Config as StateCommitterConfig, StateCommitter}; use types::InvalidL1Height; -#[derive(thiserror::Error, Debug)] +#[derive(thiserror::Error, Debug, PartialEq, Eq)] pub enum Error { #[error("{0}")] Other(String), @@ -49,6 +52,6 @@ impl From for Error { pub type Result = std::result::Result; #[trait_variant::make(Send)] -pub trait Runner: Send + Sync { +pub trait Runner: Sync { async fn run(&mut self) -> Result<()>; } diff --git a/packages/services/src/state_committer.rs b/packages/services/src/state_committer.rs index ca2dcf3f..05bece96 100644 --- a/packages/services/src/state_committer.rs +++ b/packages/services/src/state_committer.rs @@ -1,332 +1,6 @@ -pub mod service { - use std::{num::NonZeroUsize, time::Duration}; - - use crate::{ - types::{storage::BundleFragment, CollectNonEmpty, DateTime, L1Tx, NonEmpty, Utc}, - Result, Runner, - }; - use itertools::Itertools; - use tracing::info; - - // src/config.rs - #[derive(Debug, Clone)] - pub struct Config { - /// The lookback window in blocks to determine the starting height. - pub lookback_window: u32, - pub fragment_accumulation_timeout: Duration, - pub fragments_to_accumulate: NonZeroUsize, - pub gas_bump_timeout: Duration, - pub tx_max_fee: u128, - } - - #[cfg(feature = "test-helpers")] - impl Default for Config { - fn default() -> Self { - Self { - lookback_window: 1000, - fragment_accumulation_timeout: Duration::from_secs(0), - fragments_to_accumulate: 1.try_into().unwrap(), - gas_bump_timeout: Duration::from_secs(300), - tx_max_fee: 1_000_000_000, - } - } - } - - /// The `StateCommitter` is responsible for committing state fragments to L1. - pub struct StateCommitter { - l1_adapter: L1, - fuel_api: FuelApi, - storage: Db, - config: Config, - clock: Clock, - startup_time: DateTime, - } - - impl StateCommitter - where - Clock: crate::state_committer::port::Clock, - { - /// Creates a new `StateCommitter`. - pub fn new( - l1_adapter: L1, - fuel_api: FuelApi, - storage: Db, - config: Config, - clock: Clock, - ) -> Self { - let startup_time = clock.now(); - Self { - l1_adapter, - fuel_api, - storage, - config, - clock, - startup_time, - } - } - } - - impl StateCommitter - where - L1: crate::state_committer::port::l1::Api, - FuelApi: crate::state_committer::port::fuel::Api, - Db: crate::state_committer::port::Storage, - Clock: crate::state_committer::port::Clock, - { - async fn get_reference_time(&self) -> Result> { - Ok(self - .storage - .last_time_a_fragment_was_finalized() - .await? - .unwrap_or(self.startup_time)) - } - - async fn is_timeout_expired(&self) -> Result { - let reference_time = self.get_reference_time().await?; - let elapsed = self.clock.now() - reference_time; - let std_elapsed = elapsed - .to_std() - .map_err(|e| crate::Error::Other(format!("Failed to convert time: {}", e)))?; - Ok(std_elapsed >= self.config.fragment_accumulation_timeout) - } - - async fn submit_fragments( - &self, - fragments: NonEmpty, - previous_tx: Option, - ) -> Result<()> { - info!("about to send at most {} fragments", fragments.len()); - - let data = fragments.clone().map(|f| f.fragment); - - match self - .l1_adapter - .submit_state_fragments(data, previous_tx) - .await - { - Ok((submitted_tx, submitted_fragments)) => { - let fragment_ids = fragments - .iter() - .map(|f| f.id) - .take(submitted_fragments.num_fragments.get()) - .collect_nonempty() - .expect("non-empty vec"); - - let ids = fragment_ids - .iter() - .map(|id| id.as_u32().to_string()) - .join(", "); - - let tx_hash = submitted_tx.hash; - self.storage - .record_pending_tx(submitted_tx, fragment_ids, self.clock.now()) - .await?; - - tracing::info!("Submitted fragments {ids} with tx {}", hex::encode(tx_hash)); - Ok(()) - } - Err(e) => { - let ids = fragments - .iter() - .map(|f| f.id.as_u32().to_string()) - .join(", "); - - tracing::error!("Failed to submit fragments {ids}: {e}"); - - Err(e) - } - } - } - - async fn latest_pending_transaction(&self) -> Result> { - let tx = self.storage.get_latest_pending_txs().await?; - Ok(tx) - } - - async fn next_fragments_to_submit(&self) -> Result>> { - let latest_height = self.fuel_api.latest_height().await?; - let starting_height = latest_height.saturating_sub(self.config.lookback_window); - - // although we shouldn't know at this layer how many fragments the L1 can accept, we ignore - // this for now and put the eth value of max blobs per block (6). - let existing_fragments = self - .storage - .oldest_nonfinalized_fragments(starting_height, 6) - .await?; - - Ok(NonEmpty::collect(existing_fragments)) - } - - async fn should_submit_fragments(&self, fragment_count: NonZeroUsize) -> Result { - if fragment_count >= self.config.fragments_to_accumulate { - return Ok(true); - } - info!( - "have only {} out of the target {} fragments per tx", - fragment_count, self.config.fragments_to_accumulate - ); - - let expired = self.is_timeout_expired().await?; - if expired { - info!( - "fragment accumulation timeout expired, proceeding with {} fragments", - fragment_count - ); - } - - Ok(expired) - } - - async fn submit_fragments_if_ready(&self) -> Result<()> { - if let Some(fragments) = self.next_fragments_to_submit().await? { - if self - .should_submit_fragments(fragments.len_nonzero()) - .await? - { - self.submit_fragments(fragments, None).await?; - } - } - Ok(()) - } - - fn elapsed_since_tx_submitted(&self, tx: &L1Tx) -> Result { - let created_at = tx.created_at.expect("tx to have timestamp"); - - self.clock.elapsed(created_at) - } - - async fn fragments_submitted_by_tx( - &self, - tx_hash: [u8; 32], - ) -> Result> { - let fragments = self.storage.fragments_submitted_by_tx(tx_hash).await?; - - match NonEmpty::collect(fragments) { - Some(fragments) => Ok(fragments), - None => Err(crate::Error::Other(format!( - "no fragments found for previously submitted tx {}", - hex::encode(tx_hash) - ))), - } - } - - async fn resubmit_fragments_if_stalled(&self) -> Result<()> { - let Some(previous_tx) = self.latest_pending_transaction().await? else { - return Ok(()); - }; - - let elapsed = self.elapsed_since_tx_submitted(&previous_tx)?; - - if elapsed >= self.config.gas_bump_timeout { - info!( - "replacing tx {} because it was pending for {}s", - hex::encode(previous_tx.hash), - elapsed.as_secs() - ); - - let fragments = self.fragments_submitted_by_tx(previous_tx.hash).await?; - self.submit_fragments(fragments, Some(previous_tx)).await?; - } - - Ok(()) - } - } - - impl Runner for StateCommitter - where - L1: crate::state_committer::port::l1::Api + Send + Sync, - FuelApi: crate::state_committer::port::fuel::Api + Send + Sync, - Db: crate::state_committer::port::Storage + Clone + Send + Sync, - Clock: crate::state_committer::port::Clock + Send + Sync, - { - async fn run(&mut self) -> Result<()> { - if self.storage.has_nonfinalized_txs().await? { - self.resubmit_fragments_if_stalled().await? - } else { - self.submit_fragments_if_ready().await? - }; - - Ok(()) - } - } -} - -pub mod port { - use nonempty::NonEmpty; - - use crate::{ - types::{storage::BundleFragment, DateTime, L1Tx, NonNegative, Utc}, - Error, Result, - }; - - pub mod l1 { - use nonempty::NonEmpty; - - use crate::{ - types::{BlockSubmissionTx, Fragment, FragmentsSubmitted, L1Tx}, - Result, - }; - - #[allow(async_fn_in_trait)] - #[trait_variant::make(Send)] - #[cfg_attr(feature = "test-helpers", mockall::automock)] - pub trait Contract: Send + Sync { - async fn submit(&self, hash: [u8; 32], height: u32) -> Result; - } - - #[allow(async_fn_in_trait)] - #[trait_variant::make(Send)] - #[cfg_attr(feature = "test-helpers", mockall::automock)] - pub trait Api { - async fn submit_state_fragments( - &self, - fragments: NonEmpty, - previous_tx: Option, - ) -> Result<(L1Tx, FragmentsSubmitted)>; - } - } - - pub mod fuel { - use crate::Result; - pub use fuel_core_client::client::types::block::Block as FuelBlock; - - #[allow(async_fn_in_trait)] - #[trait_variant::make(Send)] - #[cfg_attr(feature = "test-helpers", mockall::automock)] - pub trait Api: Send + Sync { - async fn latest_height(&self) -> Result; - } - } - - #[allow(async_fn_in_trait)] - #[trait_variant::make(Send)] - pub trait Storage: Send + Sync { - async fn has_nonfinalized_txs(&self) -> Result; - async fn last_time_a_fragment_was_finalized(&self) -> Result>>; - async fn record_pending_tx( - &self, - tx: L1Tx, - fragment_id: NonEmpty>, - created_at: DateTime, - ) -> Result<()>; - async fn oldest_nonfinalized_fragments( - &self, - starting_height: u32, - limit: usize, - ) -> Result>; - - async fn fragments_submitted_by_tx(&self, tx_hash: [u8; 32]) - -> Result>; - async fn get_latest_pending_txs(&self) -> Result>; - } - - pub trait Clock { - fn now(&self) -> DateTime; - fn elapsed(&self, since: DateTime) -> Result { - self.now() - .signed_duration_since(since) - .to_std() - .map_err(|e| Error::Other(format!("failed to convert time: {}", e))) - } - } -} +mod fee_algo; +pub use fee_algo::{ + Config as AlgoConfig, FeeMultiplierRange, FeeThresholds, SmaFeeAlgo, SmaPeriods, +}; +pub mod port; +pub mod service; diff --git a/packages/services/src/state_committer/fee_algo.rs b/packages/services/src/state_committer/fee_algo.rs new file mode 100644 index 00000000..be5ddbb2 --- /dev/null +++ b/packages/services/src/state_committer/fee_algo.rs @@ -0,0 +1,717 @@ +use std::{ + num::{NonZeroU32, NonZeroU64}, + ops::RangeInclusive, +}; + +use tracing::info; + +use crate::{ + fee_metrics_tracker::{self}, + Error, Result, +}; + +impl

SmaFeeAlgo

+where + P: crate::fees::Api + Send + Sync, +{ + pub async fn fees_acceptable( + &self, + num_blobs: u32, + num_l2_blocks_behind: u32, + at_l1_height: u64, + ) -> Result { + if self.too_far_behind(num_l2_blocks_behind) { + info!( + "Sending because we've fallen behind by {} which is more than the configured maximum of {}", + num_l2_blocks_behind, self.config.fee_thresholds.max_l2_blocks_behind + ); + return Ok(true); + } + + // opted out of validating that num_blobs <= 6, it's not this fn's problem if the caller + // wants to send more than 6 blobs + let last_n_blocks = |n| last_n_blocks(at_l1_height, n); + + let short_term_sma = self + .fee_provider + .fees(last_n_blocks(self.config.sma_periods.short)) + .await? + .mean(); + + let long_term_sma = self + .fee_provider + .fees(last_n_blocks(self.config.sma_periods.long)) + .await? + .mean(); + + let short_term_tx_fee = + fee_metrics_tracker::service::calculate_blob_tx_fee(num_blobs, &short_term_sma); + + if self.fee_always_acceptable(short_term_tx_fee) { + info!( + "Sending because: short term price {short_term_tx_fee} is deemed always acceptable since it is <= {}", + self.config.fee_thresholds.always_acceptable_fee + ); + return Ok(true); + } + + let long_term_tx_fee = + fee_metrics_tracker::service::calculate_blob_tx_fee(num_blobs, &long_term_sma); + let max_upper_tx_fee = calculate_max_upper_fee( + &self.config.fee_thresholds, + long_term_tx_fee, + num_l2_blocks_behind, + ); + + info!( "short_term_tx_fee: {short_term_tx_fee}, long_term_tx_fee: {long_term_tx_fee}, max_upper_tx_fee: {max_upper_tx_fee}"); + + let should_send = short_term_tx_fee <= max_upper_tx_fee; + + if should_send { + info!( + "Sending because short term price {} is lower than the max upper fee {}", + short_term_tx_fee, max_upper_tx_fee + ); + } else { + info!( + "Not sending because short term price {} is higher than the max upper fee {}", + short_term_tx_fee, max_upper_tx_fee + ); + } + + Ok(should_send) + } +} + +#[derive(Debug, Clone, Copy)] +pub struct Config { + pub sma_periods: SmaPeriods, + pub fee_thresholds: FeeThresholds, +} + +#[cfg(feature = "test-helpers")] +impl Default for Config { + fn default() -> Self { + Self { + sma_periods: SmaPeriods { + short: 1.try_into().expect("not zero"), + long: 2.try_into().expect("not zero"), + }, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + always_acceptable_fee: u128::MAX, + ..Default::default() + }, + } + } +} + +#[derive(Debug, Clone, Copy)] +pub struct FeeMultiplierRange { + starting_multiplier: f64, + ending_multiplier: f64, +} + +#[cfg(feature = "test-helpers")] +impl Default for FeeMultiplierRange { + fn default() -> Self { + Self { + starting_multiplier: 1., + ending_multiplier: 1., + } + } +} + +impl FeeMultiplierRange { + pub fn new(starting_multiplier: f64, ending_multiplier: f64) -> Result { + if starting_multiplier <= 0.0 { + return Err(Error::Other(format!( + "Invalid starting multiplier value: {starting_multiplier}", + ))); + } + if ending_multiplier <= 0.0 { + return Err(Error::Other(format!( + "Invalid ending multiplier value: {ending_multiplier}", + ))); + } + + if starting_multiplier > ending_multiplier { + return Err(Error::Other(format!( + "Starting multiplier {starting_multiplier} is greater than ending multiplier {ending_multiplier}", + ))); + } + + Ok(Self { + starting_multiplier, + ending_multiplier, + }) + } + + #[cfg(feature = "test-helpers")] + pub const fn new_unchecked(starting_multiplier: f64, ending_multiplier: f64) -> Self { + Self { + starting_multiplier, + ending_multiplier, + } + } + + pub fn start_ppm(&self) -> u128 { + to_ppm(self.starting_multiplier) + } + + pub fn end_ppm(&self) -> u128 { + to_ppm(self.ending_multiplier) + } +} + +#[derive(Debug, Clone, Copy)] +pub struct FeeThresholds { + pub max_l2_blocks_behind: NonZeroU32, + pub multiplier_range: FeeMultiplierRange, + pub always_acceptable_fee: u128, +} + +#[derive(Debug, Clone, Copy)] +pub struct SmaPeriods { + pub short: NonZeroU64, + pub long: NonZeroU64, +} + +#[cfg(feature = "test-helpers")] +impl Default for FeeThresholds { + fn default() -> Self { + Self { + max_l2_blocks_behind: NonZeroU32::new(u32::MAX).unwrap(), + multiplier_range: FeeMultiplierRange::default(), + always_acceptable_fee: u128::MAX, + } + } +} + +#[derive(Clone)] +pub struct SmaFeeAlgo

{ + fee_provider: P, + config: Config, +} + +impl

SmaFeeAlgo

{ + pub const fn new(fee_provider: P, config: Config) -> Self { + Self { + fee_provider, + config, + } + } + + const fn too_far_behind(&self, num_l2_blocks_behind: u32) -> bool { + num_l2_blocks_behind > self.config.fee_thresholds.max_l2_blocks_behind.get() + } + + const fn fee_always_acceptable(&self, short_term_tx_fee: u128) -> bool { + short_term_tx_fee <= self.config.fee_thresholds.always_acceptable_fee + } +} + +const fn last_n_blocks(current_block: u64, n: NonZeroU64) -> RangeInclusive { + current_block.saturating_sub(n.get().saturating_sub(1))..=current_block +} + +fn calculate_max_upper_fee( + fee_thresholds: &FeeThresholds, + fee: u128, + num_l2_blocks_behind: u32, +) -> u128 { + let max_blocks_behind = u128::from(fee_thresholds.max_l2_blocks_behind.get()); + let blocks_behind = u128::from(num_l2_blocks_behind); + + debug_assert!( + blocks_behind <= max_blocks_behind, + "blocks_behind ({blocks_behind}) should not exceed max_blocks_behind ({max_blocks_behind}), it should have been handled earlier", + ); + + let multiplier_ppm = { + let start_multiplier_ppm = fee_thresholds.multiplier_range.start_ppm(); + let end_multiplier_ppm = fee_thresholds.multiplier_range.end_ppm(); + + // Linear interpolation: start + (end - start) * (blocks_behind / max_blocks_behind) + let delta_ppm = end_multiplier_ppm.saturating_sub(start_multiplier_ppm); + let increase_ppm = delta_ppm + .saturating_mul(blocks_behind) + .saturating_div(max_blocks_behind); + + let multiplier_ppm = start_multiplier_ppm.saturating_add(increase_ppm); + // safeguard against surpassing end_multiplier + multiplier_ppm.min(end_multiplier_ppm) + }; + + let max_fee = from_ppm(fee.saturating_mul(multiplier_ppm)); + { + let multiplier_perc = multiplier_ppm as f64 / 1_000_000.; + info!( "{blocks_behind}/{max_blocks_behind} blocks behind -> long term fee({fee}) * multiplier({multiplier_perc}) = max_fee({max_fee})"); + } + + max_fee +} + +const fn to_ppm(val: f64) -> u128 { + (val * 1_000_000.) as u128 +} + +const fn from_ppm(val: u128) -> u128 { + val.saturating_div(1_000_000) +} + +#[cfg(test)] +mod tests { + use super::{Config, SmaPeriods}; + + mod decision_making { + pub use test_case::test_case; + + use super::{Config, SmaPeriods}; + use crate::{ + fees::{testing::PreconfiguredFeeApi, Api, Fees}, + state_committer::{ + fee_algo::{FeeMultiplierRange, SmaFeeAlgo}, + FeeThresholds, + }, + }; + + struct Setup { + old_fees: Fees, + new_fees: Fees, + num_blobs: u32, + num_l2_blocks_behind: u32, + should_send: bool, + } + + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 5000, reward: 5000, base_fee_per_blob_gas: 5000}, + new_fees: Fees { base_fee_per_gas: 3000, reward: 3000, base_fee_per_blob_gas: 3000}, + num_blobs: 6, + num_l2_blocks_behind: 0, + should_send: true, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + }, + }; + "Should send because all short-term fees are lower than long-term" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 3000, reward: 3000, base_fee_per_blob_gas: 3000}, + new_fees: Fees { base_fee_per_gas: 5000, reward: 5000, base_fee_per_blob_gas: 5000}, + num_blobs: 6, + num_l2_blocks_behind: 0, + should_send: false, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + }, + }; + "Should not send because all short-term fees are higher than long-term" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 3000, reward: 3000, base_fee_per_blob_gas: 3000}, + new_fees: Fees { base_fee_per_gas: 5000, reward: 5000, base_fee_per_blob_gas: 5000}, + num_blobs: 6, + num_l2_blocks_behind: 0, + should_send: true, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + always_acceptable_fee: (21_000 * (5000 + 5000)) + (6 * 131_072 * 5000) + 1, + max_l2_blocks_behind: 100.try_into().unwrap(), + ..Default::default() + } + }; + "Should send since short-term fee less than always_acceptable_fee" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 2000, reward: 10000, base_fee_per_blob_gas: 1000}, + new_fees: Fees { base_fee_per_gas: 1500, reward: 10000, base_fee_per_blob_gas: 1000}, + num_blobs: 5, + num_l2_blocks_behind: 0, + should_send: true, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + } + }; + "Should send because short-term base_fee_per_gas is lower" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 2000, reward: 10000, base_fee_per_blob_gas: 1000}, + new_fees: Fees { base_fee_per_gas: 2500, reward: 10000, base_fee_per_blob_gas: 1000}, + num_blobs: 5, + num_l2_blocks_behind: 0, + should_send: false, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + } + }; + "Should not send because short-term base_fee_per_gas is higher" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 2000, reward: 3000, base_fee_per_blob_gas: 1000}, + new_fees: Fees { base_fee_per_gas: 2000, reward: 3000, base_fee_per_blob_gas: 900}, + num_blobs: 5, + num_l2_blocks_behind: 0, + should_send: true, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + } + }; + "Should send because short-term base_fee_per_blob_gas is lower" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 2000, reward: 3000, base_fee_per_blob_gas: 1000}, + new_fees: Fees { base_fee_per_gas: 2000, reward: 3000, base_fee_per_blob_gas: 1100}, + num_blobs: 5, + num_l2_blocks_behind: 0, + should_send: false, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + } + }; + "Should not send because short-term base_fee_per_blob_gas is higher" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 2000, reward: 10000, base_fee_per_blob_gas: 1000}, + new_fees: Fees { base_fee_per_gas: 2000, reward: 9000, base_fee_per_blob_gas: 1000}, + num_blobs: 5, + num_l2_blocks_behind: 0, + should_send: true, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + } + }; + "Should send because short-term reward is lower" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 2000, reward: 10000, base_fee_per_blob_gas: 1000}, + new_fees: Fees { base_fee_per_gas: 2000, reward: 11000, base_fee_per_blob_gas: 1000}, + num_blobs: 5, + num_l2_blocks_behind: 0, + should_send: false, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + } + }; + "Should not send because short-term reward is higher" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 3000, reward: 6000, base_fee_per_blob_gas: 5000}, + new_fees: Fees { base_fee_per_gas: 2500, reward: 5500, base_fee_per_blob_gas: 5000}, + num_blobs: 0, + num_l2_blocks_behind: 0, + should_send: true, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + } + }; + "Zero blobs: short-term base_fee_per_gas and reward are lower, send" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 3000, reward: 6000, base_fee_per_blob_gas: 5000}, + new_fees: Fees { base_fee_per_gas: 3000, reward: 7000, base_fee_per_blob_gas: 5000}, + num_blobs: 0, + num_l2_blocks_behind: 0, + should_send: false, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + } + }; + "Zero blobs: short-term reward is higher, don't send" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 3000, reward: 6000, base_fee_per_blob_gas: 5000}, + new_fees: Fees { base_fee_per_gas: 2000, reward: 6000, base_fee_per_blob_gas: 50_000_000}, + num_blobs: 0, + num_l2_blocks_behind: 0, + should_send: true, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + } + }; + "Zero blobs: ignore blob fee, short-term base_fee_per_gas is lower, send" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 6000, reward: 1, base_fee_per_blob_gas: 6000}, + new_fees: Fees { base_fee_per_gas: 7000, reward: 1, base_fee_per_blob_gas: 7000}, + num_blobs: 1, + num_l2_blocks_behind: 0, + should_send: false, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + multiplier_range: FeeMultiplierRange::new_unchecked(1.0, 1.2), + always_acceptable_fee: 0, + }, + }; + "Early: short-term expensive, not send" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 6000, reward: 1, base_fee_per_blob_gas: 6000}, + new_fees: Fees { base_fee_per_gas: 7000, reward: 1, base_fee_per_blob_gas: 7000}, + num_blobs: 1, + num_l2_blocks_behind: 100, + should_send: true, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + multiplier_range: FeeMultiplierRange::new_unchecked(1.0, 1.2), + always_acceptable_fee: 0, + } + }; + "Later: after max wait, send regardless" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 6000, reward: 1, base_fee_per_blob_gas: 6000}, + new_fees: Fees { base_fee_per_gas: 7000, reward: 1, base_fee_per_blob_gas: 7000}, + num_blobs: 1, + num_l2_blocks_behind: 80, + should_send: true, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + multiplier_range: FeeMultiplierRange::new_unchecked(1.0, 1.2), + always_acceptable_fee: 0, + }, + }; + "Mid-wait: increased tolerance allows acceptance" + )] + #[test_case( + Setup { + old_fees: Fees { base_fee_per_gas: 100_000, reward: 1, base_fee_per_blob_gas: 100_000}, + new_fees: Fees { base_fee_per_gas: 2_000_000, reward: 1_000_000, base_fee_per_blob_gas: 20_000_000}, + num_blobs: 1, + num_l2_blocks_behind: 0, + should_send: true, + }, + Config { + sma_periods: SmaPeriods { short: 2.try_into().unwrap(), long: 6.try_into().unwrap()}, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + multiplier_range: FeeMultiplierRange::new_unchecked(1.0, 1.2), + always_acceptable_fee: 2_700_000_000_000, + }, + }; + "Always acceptable fee triggers immediate send" + )] + #[tokio::test] + async fn parameterized_send_or_wait_tests( + Setup { + old_fees, + new_fees, + num_blobs, + num_l2_blocks_behind, + should_send, + }: Setup, + config: Config, + ) { + let fees = generate_fees(config.sma_periods, old_fees, new_fees); + let api = PreconfiguredFeeApi::new(fees); + let current_block_height = api.current_height().await.unwrap(); + + let sut = SmaFeeAlgo::new(api, config); + + let decision = sut + .fees_acceptable(num_blobs, num_l2_blocks_behind, current_block_height) + .await + .unwrap(); + + assert_eq!( + decision, should_send, + "For num_blobs={num_blobs}, num_l2_blocks_behind={num_l2_blocks_behind}, config={config:?}: Expected decision: {should_send}, got: {decision}", + ); + } + + fn generate_fees( + sma_periods: SmaPeriods, + old_fees: Fees, + new_fees: Fees, + ) -> Vec<(u64, Fees)> { + let older_fees = std::iter::repeat_n( + old_fees, + (sma_periods.long.get() - sma_periods.short.get()) as usize, + ); + let newer_fees = std::iter::repeat_n(new_fees, sma_periods.short.get() as usize); + + older_fees + .chain(newer_fees) + .enumerate() + .map(|(i, f)| (i as u64, f)) + .collect() + } + } + + mod upper_fee { + pub use test_case::test_case; + + use crate::state_committer::{ + fee_algo::{calculate_max_upper_fee, FeeMultiplierRange}, + FeeThresholds, + }; + struct Setup { + fee: u128, + num_l2_blocks_behind: u32, + expected_max_upper_fee: u128, + } + + #[test_case( + FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + }, + Setup { + fee: 1000, + num_l2_blocks_behind: 0, + expected_max_upper_fee: 1000 + }; + "No blocks behind, multiplier should be 1.0" + )] + #[test_case( + FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + multiplier_range: FeeMultiplierRange::new_unchecked(1.0, 1.05), + always_acceptable_fee: 0, + }, + Setup { + fee: 2000, + num_l2_blocks_behind: 50, + expected_max_upper_fee: 2050 + }; + "Half blocks behind with multiplier increase" + )] + #[test_case( + FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + multiplier_range: FeeMultiplierRange::new_unchecked(0.95, 1.0), + always_acceptable_fee: 0, + }, + Setup { + fee: 800, + num_l2_blocks_behind: 50, + expected_max_upper_fee: 780 + }; + "Start multiplier less than 1, no premium" + )] + #[test_case( + FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + multiplier_range: FeeMultiplierRange::new_unchecked(1.0, 1.3), + always_acceptable_fee: 0, + }, + Setup { + fee: 1000, + num_l2_blocks_behind: 50, + expected_max_upper_fee: 1150 + }; // 1.0 + (1.3 - 1.0) * (50/100) = 1.15 -> 1000 * 1.15 = 1150 + "End multiplier greater than 1, with premium" + )] + #[test_case( + FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + multiplier_range: FeeMultiplierRange::new_unchecked(1.0, 1.2), + always_acceptable_fee: 0, + }, + Setup { + fee: 10_000, + num_l2_blocks_behind: 99, + expected_max_upper_fee: 11_980 + }; + "High fee with premium" + )] + fn test_calculate_max_upper_fee( + fee_thresholds: FeeThresholds, + Setup { + fee, + num_l2_blocks_behind, + expected_max_upper_fee, + }: Setup, + ) { + let max_upper_fee = calculate_max_upper_fee(&fee_thresholds, fee, num_l2_blocks_behind); + + assert_eq!( + max_upper_fee, expected_max_upper_fee, + "Expected max_upper_fee to be {}, but got {}", + expected_max_upper_fee, max_upper_fee + ); + } + } +} diff --git a/packages/services/src/state_committer/port.rs b/packages/services/src/state_committer/port.rs new file mode 100644 index 00000000..f9be8f6a --- /dev/null +++ b/packages/services/src/state_committer/port.rs @@ -0,0 +1,77 @@ +use nonempty::NonEmpty; + +use crate::{ + types::{storage::BundleFragment, DateTime, L1Tx, NonNegative, Utc}, + Error, Result, +}; + +pub mod l1 { + use nonempty::NonEmpty; + + use crate::{ + types::{BlockSubmissionTx, Fragment, FragmentsSubmitted, L1Tx}, + Result, + }; + #[allow(async_fn_in_trait)] + #[trait_variant::make(Send)] + #[cfg_attr(feature = "test-helpers", mockall::automock)] + pub trait Contract: Sync { + async fn submit(&self, hash: [u8; 32], height: u32) -> Result; + } + + #[allow(async_fn_in_trait)] + #[trait_variant::make(Send)] + #[cfg_attr(feature = "test-helpers", mockall::automock)] + pub trait Api { + async fn current_height(&self) -> Result; + async fn submit_state_fragments( + &self, + fragments: NonEmpty, + previous_tx: Option, + ) -> Result<(L1Tx, FragmentsSubmitted)>; + } +} + +pub mod fuel { + pub use fuel_core_client::client::types::block::Block as FuelBlock; + + use crate::Result; + + #[allow(async_fn_in_trait)] + #[trait_variant::make(Send)] + #[cfg_attr(feature = "test-helpers", mockall::automock)] + pub trait Api: Sync { + async fn latest_height(&self) -> Result; + } +} + +#[allow(async_fn_in_trait)] +#[trait_variant::make(Send)] +pub trait Storage: Sync { + async fn has_nonfinalized_txs(&self) -> Result; + async fn last_time_a_fragment_was_finalized(&self) -> Result>>; + async fn record_pending_tx( + &self, + tx: L1Tx, + fragment_id: NonEmpty>, + created_at: DateTime, + ) -> Result<()>; + async fn oldest_nonfinalized_fragments( + &self, + starting_height: u32, + limit: usize, + ) -> Result>; + async fn latest_bundled_height(&self) -> Result>; + async fn fragments_submitted_by_tx(&self, tx_hash: [u8; 32]) -> Result>; + async fn get_latest_pending_txs(&self) -> Result>; +} + +pub trait Clock { + fn now(&self) -> DateTime; + fn elapsed(&self, since: DateTime) -> Result { + self.now() + .signed_duration_since(since) + .to_std() + .map_err(|e| Error::Other(format!("failed to convert time: {e}"))) + } +} diff --git a/packages/services/src/state_committer/service.rs b/packages/services/src/state_committer/service.rs new file mode 100644 index 00000000..afdc1d35 --- /dev/null +++ b/packages/services/src/state_committer/service.rs @@ -0,0 +1,393 @@ +use std::{num::NonZeroUsize, time::Duration}; + +use itertools::Itertools; +use metrics::{ + prometheus::{core::Collector, IntGauge, Opts}, + RegistersMetrics, +}; +use tracing::info; + +use super::{fee_algo::SmaFeeAlgo, AlgoConfig}; +use crate::{ + types::{storage::BundleFragment, CollectNonEmpty, DateTime, L1Tx, NonEmpty, Utc}, + Result, Runner, +}; + +// src/config.rs +#[derive(Debug, Clone)] +pub struct Config { + /// The lookback window in blocks to determine the starting height. + pub lookback_window: u32, + pub fragment_accumulation_timeout: Duration, + pub fragments_to_accumulate: NonZeroUsize, + pub gas_bump_timeout: Duration, + pub fee_algo: AlgoConfig, +} + +#[cfg(feature = "test-helpers")] +impl Default for Config { + fn default() -> Self { + Self { + lookback_window: 1000, + fragment_accumulation_timeout: Duration::from_secs(0), + fragments_to_accumulate: 1.try_into().unwrap(), + gas_bump_timeout: Duration::from_secs(300), + fee_algo: AlgoConfig::default(), + } + } +} + +struct Metrics { + current_height_to_commit: IntGauge, +} + +impl Default for Metrics { + fn default() -> Self { + let current_height_to_commit = IntGauge::with_opts(Opts::new( + "current_height_to_commit", + "The starting l2 height of the bundle we're committing/will commit next", + )) + .expect("metric config to be correct"); + + Self { + current_height_to_commit, + } + } +} + +impl RegistersMetrics for StateCommitter { + fn metrics(&self) -> Vec> { + vec![Box::new(self.metrics.current_height_to_commit.clone())] + } +} + +/// The `StateCommitter` is responsible for committing state fragments to L1. +pub struct StateCommitter { + l1_adapter: L1, + fuel_api: FuelApi, + storage: Db, + config: Config, + clock: Clock, + startup_time: DateTime, + metrics: Metrics, + fee_algo: SmaFeeAlgo, +} + +impl StateCommitter +where + Clock: crate::state_committer::port::Clock, +{ + /// Creates a new `StateCommitter`. + pub fn new( + l1_adapter: L1, + fuel_api: FuelApi, + storage: Db, + config: Config, + clock: Clock, + fee_provider: FeeProvider, + ) -> Self { + let startup_time = clock.now(); + + Self { + fee_algo: SmaFeeAlgo::new(fee_provider, config.fee_algo), + l1_adapter, + fuel_api, + storage, + config, + clock, + startup_time, + metrics: Metrics::default(), + } + } +} + +impl StateCommitter +where + L1: crate::state_committer::port::l1::Api + Send + Sync, + FuelApi: crate::state_committer::port::fuel::Api, + Db: crate::state_committer::port::Storage, + Clock: crate::state_committer::port::Clock, + FeeProvider: crate::fees::Api + Sync, +{ + async fn get_reference_time(&self) -> Result> { + Ok(self + .storage + .last_time_a_fragment_was_finalized() + .await? + .unwrap_or(self.startup_time)) + } + + async fn is_timeout_expired(&self) -> Result { + let reference_time = self.get_reference_time().await?; + let elapsed = self.clock.now() - reference_time; + let std_elapsed = elapsed + .to_std() + .map_err(|e| crate::Error::Other(format!("Failed to convert time: {e}")))?; + Ok(std_elapsed >= self.config.fragment_accumulation_timeout) + } + + async fn fees_acceptable(&self, fragments: &NonEmpty) -> Result { + let l1_height = self.l1_adapter.current_height().await?; + let l2_height = self.fuel_api.latest_height().await?; + + let oldest_l2_block = Self::oldest_l2_block_in_fragments(fragments); + self.update_oldest_block_metric(oldest_l2_block); + + let num_l2_blocks_behind = l2_height.saturating_sub(oldest_l2_block); + + self.fee_algo + .fees_acceptable( + u32::try_from(fragments.len()).expect("not to send more than u32::MAX blobs"), + num_l2_blocks_behind, + l1_height, + ) + .await + } + + fn oldest_l2_block_in_fragments(fragments: &NonEmpty) -> u32 { + fragments + .minimum_by_key(|b| b.oldest_block_in_bundle) + .oldest_block_in_bundle + } + + async fn submit_fragments( + &self, + fragments: NonEmpty, + previous_tx: Option, + ) -> Result<()> { + info!("about to send at most {} fragments", fragments.len()); + + let data = fragments.clone().map(|f| f.fragment); + + match self + .l1_adapter + .submit_state_fragments(data, previous_tx) + .await + { + Ok((submitted_tx, submitted_fragments)) => { + let fragment_ids = fragments + .iter() + .map(|f| f.id) + .take(submitted_fragments.num_fragments.get()) + .collect_nonempty() + .expect("non-empty vec"); + + let ids = fragment_ids + .iter() + .map(|id| id.as_u32().to_string()) + .join(", "); + + let tx_hash = submitted_tx.hash; + self.storage + .record_pending_tx(submitted_tx, fragment_ids, self.clock.now()) + .await?; + + tracing::info!("Submitted fragments {ids} with tx {}", hex::encode(tx_hash)); + Ok(()) + } + Err(e) => { + let ids = fragments + .iter() + .map(|f| f.id.as_u32().to_string()) + .join(", "); + + tracing::error!("Failed to submit fragments {ids}: {e}"); + + Err(e) + } + } + } + + async fn latest_pending_transaction(&self) -> Result> { + let tx = self.storage.get_latest_pending_txs().await?; + Ok(tx) + } + + async fn next_fragments_to_submit(&self) -> Result>> { + let latest_height = self.fuel_api.latest_height().await?; + let starting_height = latest_height.saturating_sub(self.config.lookback_window); + + // although we shouldn't know at this layer how many fragments the L1 can accept, we ignore + // this for now and put the eth value of max blobs per block (6). + let existing_fragments = self + .storage + .oldest_nonfinalized_fragments(starting_height, 6) + .await?; + + let fragments = NonEmpty::collect(existing_fragments); + + if let Some(fragments) = fragments.as_ref() { + // Tracking the metric here as well to get updates more often -- because + // submit_fragments might not be called + self.update_oldest_block_metric(Self::oldest_l2_block_in_fragments(fragments)); + } + + Ok(fragments) + } + + fn update_oldest_block_metric(&self, oldest_height: u32) { + self.metrics + .current_height_to_commit + .set(oldest_height.into()); + } + + async fn should_submit_fragments(&self, fragments: &NonEmpty) -> Result { + let fragment_count = fragments.len_nonzero(); + + let expired = || async { + let expired = self.is_timeout_expired().await?; + if expired { + info!( + "fragment accumulation timeout expired, available {}/{} fragments", + fragment_count, self.config.fragments_to_accumulate + ); + } + Result::Ok(expired) + }; + + let enough_fragments = || { + let enough_fragments = fragment_count >= self.config.fragments_to_accumulate; + if !enough_fragments { + info!( + "not enough fragments {}/{}", + fragment_count, self.config.fragments_to_accumulate + ); + }; + enough_fragments + }; + + // wrapped in closures so that we short-circuit *and* reduce redundant logs + Ok((enough_fragments() || expired().await?) && self.fees_acceptable(fragments).await?) + } + + async fn submit_fragments_if_ready(&self) -> Result<()> { + if let Some(fragments) = self.next_fragments_to_submit().await? { + if self.should_submit_fragments(&fragments).await? { + self.submit_fragments(fragments, None).await?; + } + } else { + // if we have no fragments to submit, that means that we're up to date and new + // blocks haven't been bundled yet + let current_height_to_commit = + if let Some(height) = self.storage.latest_bundled_height().await? { + height.saturating_add(1) + } else { + self.fuel_api + .latest_height() + .await? + .saturating_sub(self.config.lookback_window) + }; + + self.metrics + .current_height_to_commit + .set(current_height_to_commit.into()); + } + + Ok(()) + } + + fn elapsed_since_tx_submitted(&self, tx: &L1Tx) -> Result { + let created_at = tx.created_at.expect("tx to have timestamp"); + + self.clock.elapsed(created_at) + } + + async fn fragments_submitted_by_tx( + &self, + tx_hash: [u8; 32], + ) -> Result> { + let fragments = self.storage.fragments_submitted_by_tx(tx_hash).await?; + + NonEmpty::collect(fragments).ok_or_else(|| { + crate::Error::Other(format!( + "no fragments found for previously submitted tx {}", + hex::encode(tx_hash) + )) + }) + } + + async fn resubmit_fragments_if_stalled(&self) -> Result<()> { + let Some(previous_tx) = self.latest_pending_transaction().await? else { + return Ok(()); + }; + + let elapsed = self.elapsed_since_tx_submitted(&previous_tx)?; + + if elapsed >= self.config.gas_bump_timeout { + info!( + "replacing tx {} because it was pending for {}s", + hex::encode(previous_tx.hash), + elapsed.as_secs() + ); + + let fragments = self.fragments_submitted_by_tx(previous_tx.hash).await?; + if self.fees_acceptable(&fragments).await? { + self.submit_fragments(fragments, Some(previous_tx)).await?; + } + } + + Ok(()) + } +} + +impl Runner + for StateCommitter +where + L1: crate::state_committer::port::l1::Api + Send + Sync, + FuelApi: crate::state_committer::port::fuel::Api + Send + Sync, + Db: crate::state_committer::port::Storage + Clone + Send + Sync, + Clock: crate::state_committer::port::Clock + Send + Sync, + FeeProvider: crate::fees::Api + Send + Sync, +{ + async fn run(&mut self) -> Result<()> { + if self.storage.has_nonfinalized_txs().await? { + self.resubmit_fragments_if_stalled().await?; + } else { + self.submit_fragments_if_ready().await?; + }; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + + use super::*; + use crate::{ + fees::testing::PreconfiguredFeeApi, + state_committer::{FeeThresholds, SmaPeriods}, + }; + + #[tokio::test] + async fn test_send_when_too_far_behind_and_fee_provider_fails() { + // given + let config = AlgoConfig { + sma_periods: SmaPeriods { + short: 2.try_into().unwrap(), + long: 6.try_into().unwrap(), + }, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 10.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + }, + }; + + // having no fees will make the validation in fee analytics fail + let api = PreconfiguredFeeApi::new(vec![]); + let sut = SmaFeeAlgo::new(api, config); + + // when + let should_send = sut + .fees_acceptable(1, 20, 100) + .await + .expect("Should send despite fee provider failure"); + + // then + assert!( + should_send, + "Should send because too far behind, regardless of fee provider status" + ); + } +} diff --git a/packages/services/src/state_listener.rs b/packages/services/src/state_listener.rs index b7aa6ae4..1524d5b1 100644 --- a/packages/services/src/state_listener.rs +++ b/packages/services/src/state_listener.rs @@ -1,16 +1,17 @@ pub mod service { use std::collections::HashSet; - use crate::{ - types::{L1Tx, TransactionCostUpdate, TransactionState}, - Runner, - }; use metrics::{ prometheus::{core::Collector, IntGauge, Opts}, RegistersMetrics, }; use tracing::info; + use crate::{ + types::{L1Tx, TransactionCostUpdate, TransactionState}, + Runner, + }; + pub struct StateListener { l1_adapter: L1, storage: Db, @@ -148,7 +149,7 @@ pub mod service { self.metrics .last_eth_block_w_blob - .set(i64::try_from(tx_response.block_number()).unwrap_or(i64::MAX)) + .set(i64::try_from(tx_response.block_number()).unwrap_or(i64::MAX)); } selective_change.retain(|(_, nonce, _)| !skip_nonces.contains(nonce)); @@ -253,7 +254,7 @@ pub mod port { #[allow(async_fn_in_trait)] #[trait_variant::make(Send)] - pub trait Storage: Send + Sync { + pub trait Storage: Sync { async fn get_non_finalized_txs(&self) -> Result>; async fn update_tx_states_and_costs( &self, diff --git a/packages/services/src/state_pruner.rs b/packages/services/src/state_pruner.rs index 22d24540..c4966f77 100644 --- a/packages/services/src/state_pruner.rs +++ b/packages/services/src/state_pruner.rs @@ -1,13 +1,13 @@ pub mod service { + use std::time::Duration; + use metrics::{ prometheus::{core::Collector, IntGauge}, RegistersMetrics, }; - use crate::{Result, Runner}; - use std::time::Duration; - use super::create_int_gauge; + use crate::{Result, Runner}; pub struct StatePruner { storage: Db, @@ -187,7 +187,7 @@ pub mod port { Result, }; - #[derive(Debug, Clone, PartialEq, PartialOrd)] + #[derive(Debug, Clone, PartialEq, PartialOrd, Eq)] pub struct TableSizes { pub blob_transactions: u32, pub fragments: u32, @@ -201,7 +201,7 @@ pub mod port { #[allow(async_fn_in_trait)] #[trait_variant::make(Send)] - pub trait Storage: Send + Sync { + pub trait Storage: Sync { async fn prune_entries_older_than(&self, date: DateTime) -> Result<()>; async fn table_sizes(&self) -> Result; } diff --git a/packages/services/src/types/storage.rs b/packages/services/src/types/storage.rs index 3f93e8ca..60012162 100644 --- a/packages/services/src/types/storage.rs +++ b/packages/services/src/types/storage.rs @@ -16,6 +16,7 @@ pub struct BundleFragment { pub id: NonNegative, pub idx: NonNegative, pub bundle_id: NonNegative, + pub oldest_block_in_bundle: u32, pub fragment: Fragment, } @@ -81,7 +82,7 @@ pub struct InvalidSequence { } impl InvalidSequence { - pub fn new(reason: String) -> Self { + pub const fn new(reason: String) -> Self { Self { reason } } } @@ -142,7 +143,7 @@ mod tests { fn create_non_empty_fuel_blocks(block_heights: &[u32]) -> NonEmpty { block_heights .iter() - .cloned() + .copied() .map(create_fuel_block) .collect_nonempty() .unwrap() @@ -176,7 +177,7 @@ mod tests { let blocks = create_non_empty_fuel_blocks(&[1, 3, 2, 4, 5]); // when - let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()); + let seq_blocks = SequentialFuelBlocks::try_from(blocks); // then assert!( @@ -198,7 +199,7 @@ mod tests { let blocks = create_non_empty_fuel_blocks(&[1, 2, 4, 5]); // when - let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()); + let seq_blocks = SequentialFuelBlocks::try_from(blocks); // then assert!( @@ -218,10 +219,10 @@ mod tests { fn iterates_over_sequential_fuel_blocks_correctly() { // given let blocks = create_non_empty_fuel_blocks(&[10, 11, 12]); - let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()).unwrap(); + let seq_blocks = SequentialFuelBlocks::try_from(blocks).unwrap(); // when - let collected: Vec = seq_blocks.clone().into_iter().collect(); + let collected: Vec = seq_blocks.into_iter().collect(); // then assert_eq!( @@ -240,7 +241,7 @@ mod tests { fn indexing_returns_correct_fuel_block() { // given let blocks = create_non_empty_fuel_blocks(&[100, 101, 102, 103]); - let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()).unwrap(); + let seq_blocks = SequentialFuelBlocks::try_from(blocks).unwrap(); // when & Then assert_eq!( @@ -280,7 +281,7 @@ mod tests { fn len_returns_correct_number_of_blocks() { // given let blocks = create_non_empty_fuel_blocks(&[7, 8, 9, 10]); - let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()).unwrap(); + let seq_blocks = SequentialFuelBlocks::try_from(blocks).unwrap(); // when let length = seq_blocks.len(); @@ -298,7 +299,7 @@ mod tests { fn height_range_returns_correct_range() { // given let blocks = create_non_empty_fuel_blocks(&[20, 21, 22, 23]); - let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()).unwrap(); + let seq_blocks = SequentialFuelBlocks::try_from(blocks).unwrap(); // when let range = seq_blocks.height_range(); @@ -421,7 +422,7 @@ mod tests { let blocks = nonempty![create_fuel_block(1), create_fuel_block(1)]; // when - let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()); + let seq_blocks = SequentialFuelBlocks::try_from(blocks); // then assert!( @@ -443,7 +444,7 @@ mod tests { let blocks = nonempty![create_fuel_block(1), create_fuel_block(3)]; // when - let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()); + let seq_blocks = SequentialFuelBlocks::try_from(blocks); // then assert!( diff --git a/packages/services/src/wallet_balance_tracker.rs b/packages/services/src/wallet_balance_tracker.rs index dd5f5526..55715775 100644 --- a/packages/services/src/wallet_balance_tracker.rs +++ b/packages/services/src/wallet_balance_tracker.rs @@ -1,16 +1,15 @@ pub mod service { use std::collections::HashMap; - use crate::{ - types::{Address, U256}, - Result, - }; use metrics::{ prometheus::{core::Collector, IntGauge, Opts}, RegistersMetrics, }; - use crate::Runner; + use crate::{ + types::{Address, U256}, + Result, Runner, + }; struct Balance { gauge: IntGauge, @@ -29,7 +28,7 @@ pub mod service { pub fn new(api: Api) -> Self { Self { api, - tracking: Default::default(), + tracking: HashMap::default(), } } @@ -99,16 +98,19 @@ mod tests { use std::str::FromStr; - use crate::types::Address; use alloy::primitives::U256; use metrics::{ - prometheus::{proto::Metric, Registry}, + prometheus::{ + proto::{Metric, MetricFamily}, + Registry, + }, RegistersMetrics, }; use mockall::predicate::eq; use service::WalletBalanceTracker; use super::*; + use crate::types::Address; #[tokio::test] async fn updates_metrics() { @@ -144,7 +146,7 @@ mod tests { let eth_balance_metric = metrics .iter() .filter(|metric_group| metric_group.get_name() == "wallet_balance") - .flat_map(|metric_group| metric_group.get_metric()) + .flat_map(MetricFamily::get_metric) .filter(|metric| { metric.get_label().iter().any(|label| { label.get_name() == "usage" && (label.get_value() == expected_label_value) diff --git a/packages/services/tests/block_bundler.rs b/packages/services/tests/block_bundler.rs index 802e9195..4ae2218b 100644 --- a/packages/services/tests/block_bundler.rs +++ b/packages/services/tests/block_bundler.rs @@ -223,8 +223,9 @@ async fn does_nothing_if_not_enough_blocks() -> Result<()> { #[tokio::test] async fn stops_accumulating_blocks_if_time_runs_out_measured_from_component_creation() -> Result<()> { - use services::block_bundler::port::Storage as BundlerStorage; - use services::state_committer::port::Storage; + use services::{ + block_bundler::port::Storage as BundlerStorage, state_committer::port::Storage, + }; // given let setup = test_helpers::Setup::init().await; @@ -561,8 +562,9 @@ async fn doesnt_stop_advancing_if_there_is_still_time_to_optimize() -> Result<() #[tokio::test] async fn skips_blocks_outside_lookback_window() -> Result<()> { - use services::block_bundler::port::Storage as BundlerStorage; - use services::state_committer::port::Storage; + use services::{ + block_bundler::port::Storage as BundlerStorage, state_committer::port::Storage, + }; // given let setup = test_helpers::Setup::init().await; diff --git a/packages/services/tests/block_committer.rs b/packages/services/tests/block_committer.rs index 0002df32..708113a8 100644 --- a/packages/services/tests/block_committer.rs +++ b/packages/services/tests/block_committer.rs @@ -1,11 +1,11 @@ -use services::types::{TransactionResponse, TransactionState, Utc}; use services::{ block_committer::{port::Storage, service::BlockCommitter}, + types::{TransactionResponse, TransactionState, Utc}, Runner, }; -use test_helpers::{ - mocks::fuel::{given_a_block, given_fetcher, given_secret_key}, - mocks::l1::{expects_contract_submission, expects_transaction_response, FullL1Mock}, +use test_helpers::mocks::{ + fuel::{given_a_block, given_fetcher, given_secret_key}, + l1::{expects_contract_submission, expects_transaction_response, FullL1Mock}, }; #[tokio::test] diff --git a/packages/services/tests/state_committer.rs b/packages/services/tests/state_committer.rs index 831dbeb5..7b9b7d95 100644 --- a/packages/services/tests/state_committer.rs +++ b/packages/services/tests/state_committer.rs @@ -1,8 +1,14 @@ +use std::{iter::repeat, time::Duration}; + +use itertools::Itertools; +use metrics::{prometheus, RegistersMetrics}; use services::{ + fees::Fees, + state_committer::{AlgoConfig, FeeThresholds, SmaPeriods}, types::{L1Tx, NonEmpty}, Result, Runner, StateCommitter, StateCommitterConfig, }; -use std::time::Duration; +use test_helpers::{mocks, noop_fees, preconfigured_fees}; #[tokio::test] async fn submits_fragments_when_required_count_accumulated() -> Result<()> { @@ -11,15 +17,13 @@ async fn submits_fragments_when_required_count_accumulated() -> Result<()> { let fragments = setup.insert_fragments(0, 4).await; - let tx_hash = [0; 32]; - let l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([( + let mut l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([( Some(NonEmpty::from_vec(fragments.clone()).unwrap()), - L1Tx { - hash: tx_hash, - nonce: 0, - ..Default::default() - }, + L1Tx::default(), )]); + l1_mock_submit + .expect_current_height() + .returning(|| Box::pin(async { Ok(0) })); let fuel_mock = test_helpers::mocks::fuel::latest_height_is(0); let mut state_committer = StateCommitter::new( @@ -33,6 +37,7 @@ async fn submits_fragments_when_required_count_accumulated() -> Result<()> { ..Default::default() }, setup.test_clock(), + noop_fees(), ); // when @@ -52,7 +57,7 @@ async fn submits_fragments_on_timeout_before_accumulation() -> Result<()> { let fragments = setup.insert_fragments(0, 5).await; // Only 5 fragments, less than required let tx_hash = [1; 32]; - let l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([( + let mut l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([( Some(NonEmpty::from_vec(fragments.clone()).unwrap()), L1Tx { hash: tx_hash, @@ -61,6 +66,9 @@ async fn submits_fragments_on_timeout_before_accumulation() -> Result<()> { }, )]); + l1_mock_submit + .expect_current_height() + .returning(|| Box::pin(async { Ok(0) })); let fuel_mock = test_helpers::mocks::fuel::latest_height_is(0); let mut state_committer = StateCommitter::new( l1_mock_submit, @@ -73,6 +81,7 @@ async fn submits_fragments_on_timeout_before_accumulation() -> Result<()> { ..Default::default() }, test_clock.clone(), + noop_fees(), ); // Advance time beyond the timeout @@ -108,6 +117,7 @@ async fn does_not_submit_fragments_before_required_count_or_timeout() -> Result< ..Default::default() }, test_clock.clone(), + noop_fees(), ); // Advance time less than the timeout @@ -129,7 +139,7 @@ async fn submits_fragments_when_required_count_before_timeout() -> Result<()> { let fragments = setup.insert_fragments(0, 5).await; let tx_hash = [3; 32]; - let l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([( + let mut l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([( Some(NonEmpty::from_vec(fragments).unwrap()), L1Tx { hash: tx_hash, @@ -137,6 +147,9 @@ async fn submits_fragments_when_required_count_before_timeout() -> Result<()> { ..Default::default() }, )]); + l1_mock_submit + .expect_current_height() + .returning(|| Box::pin(async { Ok(0) })); let fuel_mock = test_helpers::mocks::fuel::latest_height_is(0); let mut state_committer = StateCommitter::new( @@ -150,6 +163,7 @@ async fn submits_fragments_when_required_count_before_timeout() -> Result<()> { ..Default::default() }, setup.test_clock(), + noop_fees(), ); // when @@ -172,7 +186,7 @@ async fn timeout_measured_from_last_finalized_fragment() -> Result<()> { let fragments_to_submit = setup.insert_fragments(1, 2).await; let tx_hash = [4; 32]; - let l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([( + let mut l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([( Some(NonEmpty::from_vec(fragments_to_submit).unwrap()), L1Tx { hash: tx_hash, @@ -180,6 +194,9 @@ async fn timeout_measured_from_last_finalized_fragment() -> Result<()> { ..Default::default() }, )]); + l1_mock_submit + .expect_current_height() + .returning(|| Box::pin(async { Ok(1) })); let fuel_mock = test_helpers::mocks::fuel::latest_height_is(1); let mut state_committer = StateCommitter::new( @@ -193,6 +210,7 @@ async fn timeout_measured_from_last_finalized_fragment() -> Result<()> { ..Default::default() }, test_clock.clone(), + noop_fees(), ); // Advance time to exceed the timeout since last finalized fragment @@ -215,7 +233,7 @@ async fn timeout_measured_from_startup_if_no_finalized_fragment() -> Result<()> let fragments = setup.insert_fragments(0, 5).await; // Only 5 fragments, less than required let tx_hash = [5; 32]; - let l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([( + let mut l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([( Some(NonEmpty::from_vec(fragments.clone()).unwrap()), L1Tx { hash: tx_hash, @@ -225,6 +243,9 @@ async fn timeout_measured_from_startup_if_no_finalized_fragment() -> Result<()> )]); let fuel_mock = test_helpers::mocks::fuel::latest_height_is(0); + l1_mock_submit + .expect_current_height() + .returning(|| Box::pin(async { Ok(1) })); let mut state_committer = StateCommitter::new( l1_mock_submit, fuel_mock, @@ -236,6 +257,7 @@ async fn timeout_measured_from_startup_if_no_finalized_fragment() -> Result<()> ..Default::default() }, test_clock.clone(), + noop_fees(), ); // Advance time beyond the timeout from startup @@ -259,7 +281,7 @@ async fn resubmits_fragments_when_gas_bump_timeout_exceeded() -> Result<()> { let tx_hash_1 = [6; 32]; let tx_hash_2 = [7; 32]; - let l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([ + let mut l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([ ( Some(NonEmpty::from_vec(fragments.clone()).unwrap()), L1Tx { @@ -278,6 +300,10 @@ async fn resubmits_fragments_when_gas_bump_timeout_exceeded() -> Result<()> { ), ]); + l1_mock_submit + .expect_current_height() + .returning(|| Box::pin(async { Ok(0) })); + let fuel_mock = test_helpers::mocks::fuel::latest_height_is(0); let mut state_committer = StateCommitter::new( l1_mock_submit, @@ -291,6 +317,7 @@ async fn resubmits_fragments_when_gas_bump_timeout_exceeded() -> Result<()> { ..Default::default() }, test_clock.clone(), + noop_fees(), ); // Submit the initial fragments @@ -307,3 +334,397 @@ async fn resubmits_fragments_when_gas_bump_timeout_exceeded() -> Result<()> { // Mocks validate that the fragments have been sent again Ok(()) } + +#[tokio::test] +async fn sends_transaction_when_short_term_fee_favorable() -> Result<()> { + // given + let setup = test_helpers::Setup::init().await; + + let expensive = Fees { + base_fee_per_gas: 5000.try_into().unwrap(), + reward: 5000.try_into().unwrap(), + base_fee_per_blob_gas: 5000.try_into().unwrap(), + }; + let cheap = Fees { + base_fee_per_gas: 3000.try_into().unwrap(), + reward: 3000.try_into().unwrap(), + base_fee_per_blob_gas: 3000.try_into().unwrap(), + }; + let fee_sequence = vec![ + (0, expensive), + (1, expensive), + (2, expensive), + (3, expensive), + (4, cheap), + (5, cheap), + ]; + + let config = AlgoConfig { + sma_periods: SmaPeriods { + short: 2.try_into().unwrap(), + long: 6.try_into().unwrap(), + }, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: u32::MAX.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + }, + }; + + // enough fragments to meet the accumulation threshold + let fragments = setup.insert_fragments(0, 6).await; + + let mut l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([( + Some(NonEmpty::from_vec(fragments.clone()).unwrap()), + L1Tx::default(), + )]); + l1_mock_submit + .expect_current_height() + .returning(|| Box::pin(async { Ok(5) })); + + let fuel_mock = test_helpers::mocks::fuel::latest_height_is(100); + let mut state_committer = StateCommitter::new( + l1_mock_submit, + fuel_mock, + setup.db(), + StateCommitterConfig { + lookback_window: 1000, + fragment_accumulation_timeout: Duration::from_secs(60), + fragments_to_accumulate: 6.try_into().unwrap(), + fee_algo: config, + ..Default::default() + }, + setup.test_clock(), + preconfigured_fees(fee_sequence), + ); + + // when + state_committer.run().await?; + + // then + // mocks validate that the fragments have been sent + Ok(()) +} + +#[tokio::test] +async fn does_not_send_transaction_when_short_term_fee_unfavorable() -> Result<()> { + // given + let setup = test_helpers::Setup::init().await; + + let expensive = Fees { + base_fee_per_gas: 5000.try_into().unwrap(), + reward: 5000.try_into().unwrap(), + base_fee_per_blob_gas: 5000.try_into().unwrap(), + }; + let cheap = Fees { + base_fee_per_gas: 3000.try_into().unwrap(), + reward: 3000.try_into().unwrap(), + base_fee_per_blob_gas: 3000.try_into().unwrap(), + }; + + let fee_sequence = vec![ + (0, cheap), + (1, cheap), + (2, cheap), + (3, cheap), + (4, expensive), + (5, expensive), + ]; + + let fee_algo = AlgoConfig { + sma_periods: SmaPeriods { + short: 2.try_into().unwrap(), + long: 6.try_into().unwrap(), + }, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: u32::MAX.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + }, + }; + + // enough fragments to meet the accumulation threshold + let _fragments = setup.insert_fragments(0, 6).await; + + let mut l1_mock = test_helpers::mocks::l1::expects_state_submissions([]); + l1_mock + .expect_current_height() + .returning(|| Box::pin(async { Ok(5) })); + + let fuel_mock = test_helpers::mocks::fuel::latest_height_is(6); + let mut state_committer = StateCommitter::new( + l1_mock, + fuel_mock, + setup.db(), + StateCommitterConfig { + lookback_window: 1000, + fragment_accumulation_timeout: Duration::from_secs(60), + fragments_to_accumulate: 6.try_into().unwrap(), + fee_algo, + ..Default::default() + }, + setup.test_clock(), + preconfigured_fees(fee_sequence), + ); + + // when + state_committer.run().await?; + + // then + // mocks validate that no fragments have been sent + Ok(()) +} + +#[tokio::test] +async fn sends_transaction_when_l2_blocks_behind_exceeds_max() -> Result<()> { + // given + let setup = test_helpers::Setup::init().await; + + // high fees to ensure that without the behind condition, it wouldn't send + let expensive = Fees { + base_fee_per_gas: 5000.try_into().unwrap(), + reward: 5000.try_into().unwrap(), + base_fee_per_blob_gas: 5000.try_into().unwrap(), + }; + + let super_expensive = Fees { + base_fee_per_gas: 7000.try_into().unwrap(), + reward: 7000.try_into().unwrap(), + base_fee_per_blob_gas: 7000.try_into().unwrap(), + }; + let expensive_seq = (0..=3).zip(repeat(expensive)); + let super_expen_seq = (4..=5).zip(repeat(super_expensive)); + let fee_sequence = expensive_seq.chain(super_expen_seq).collect_vec(); + + let fee_algo = AlgoConfig { + sma_periods: SmaPeriods { + short: 2.try_into().unwrap(), + long: 6.try_into().unwrap(), + }, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 50.try_into().unwrap(), + always_acceptable_fee: 0, + ..Default::default() + }, + }; + + // enough fragments to meet the accumulation threshold + let fragments = setup.insert_fragments(0, 6).await; + + let mut l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([( + Some(NonEmpty::from_vec(fragments.clone()).unwrap()), + L1Tx::default(), + )]); + l1_mock_submit + .expect_current_height() + .returning(|| Box::pin(async { Ok(5) })); + + let fuel_mock = test_helpers::mocks::fuel::latest_height_is(51); + let mut state_committer = StateCommitter::new( + l1_mock_submit, + fuel_mock, + setup.db(), + StateCommitterConfig { + lookback_window: 1000, + fragment_accumulation_timeout: Duration::from_secs(60), + fragments_to_accumulate: 6.try_into().unwrap(), + fee_algo, + ..Default::default() + }, + setup.test_clock(), + preconfigured_fees(fee_sequence), + ); + + // when + state_committer.run().await?; + + // then + // Mocks validate that the fragments have been sent despite high fees + Ok(()) +} + +#[tokio::test] +async fn sends_transaction_when_nearing_max_blocks_behind_with_increased_tolerance() -> Result<()> { + // given + let setup = test_helpers::Setup::init().await; + + let normal = Fees { + base_fee_per_gas: 5000.try_into().unwrap(), + reward: 5000.try_into().unwrap(), + base_fee_per_blob_gas: 5000.try_into().unwrap(), + }; + + let slightly_more_expensive = Fees { + base_fee_per_gas: 5800.try_into().unwrap(), + reward: 5800.try_into().unwrap(), + base_fee_per_blob_gas: 5800.try_into().unwrap(), + }; + let fee_sequence = vec![ + (95, normal), + (96, normal), + (97, normal), + (98, normal), + (99, slightly_more_expensive), + (100, slightly_more_expensive), + ]; + + let fee_algo = AlgoConfig { + sma_periods: SmaPeriods { + short: 2.try_into().unwrap(), + long: 5.try_into().unwrap(), + }, + fee_thresholds: FeeThresholds { + max_l2_blocks_behind: 100.try_into().unwrap(), + multiplier_range: services::state_committer::FeeMultiplierRange::new_unchecked( + 0.80, 1.20, + ), + always_acceptable_fee: 0, + }, + }; + + let fragments = setup.insert_fragments(0, 6).await; + + let mut l1_mock_submit = test_helpers::mocks::l1::expects_state_submissions([( + Some(NonEmpty::from_vec(fragments.clone()).unwrap()), + L1Tx::default(), + )]); + l1_mock_submit + .expect_current_height() + .returning(|| Box::pin(async { Ok(100) })); + + let fuel_mock = test_helpers::mocks::fuel::latest_height_is(80); + + let mut state_committer = StateCommitter::new( + l1_mock_submit, + fuel_mock, + setup.db(), + StateCommitterConfig { + lookback_window: 1000, + fragment_accumulation_timeout: Duration::from_secs(60), + fragments_to_accumulate: 6.try_into().unwrap(), + fee_algo, + ..Default::default() + }, + setup.test_clock(), + preconfigured_fees(fee_sequence), + ); + + // when + state_committer.run().await?; + + // then + // Mocks validate that the fragments have been sent due to increased tolerance from nearing max blocks behind + Ok(()) +} + +#[tokio::test] +async fn updates_current_height_to_commit_metric_with_latest_bundled_height() -> Result<()> { + // given + let setup = test_helpers::Setup::init().await; + let test_clock = setup.test_clock(); + + setup.commit_block_bundle([0; 32], 0, 100).await; + + let l1_mock_submit = mocks::l1::expects_state_submissions(vec![]); + + let fuel_mock = mocks::fuel::latest_height_is(150); + + let registry = prometheus::Registry::new(); + + let mut state_committer = StateCommitter::new( + l1_mock_submit, + fuel_mock, + setup.db(), + StateCommitterConfig { + lookback_window: 1000, + fragment_accumulation_timeout: Duration::from_secs(60), + fragments_to_accumulate: 10.try_into().unwrap(), + ..Default::default() + }, + test_clock.clone(), + noop_fees(), + ); + + state_committer.register_metrics(®istry); + + // when + state_committer.run().await?; + + // then + let gathered_metrics = registry.gather(); + let metric = gathered_metrics + .iter() + .find(|m| m.get_name() == "current_height_to_commit") + .expect("Metric `current_height_to_commit` should be present"); + + // Extract the gauge value + let metric_value = metric + .get_metric() + .iter() + .next() + .and_then(|m| m.get_gauge().get_value().into()) + .expect("Metric `current_height_to_commit` should have a value"); + + assert_eq!( + metric_value, 101.0, + "current_height_to_commit should be set to latest_bundled_height + 1 (100 + 1 = 101)" + ); + + Ok(()) +} + +#[tokio::test] +async fn updates_current_height_to_commit_metric_without_latest_bundled_height() -> Result<()> { + // given + let setup = test_helpers::Setup::init().await; + let test_clock = setup.test_clock(); + + // Do NOT commit any block, leaving `latest_bundled_height` as None + + let l1_mock_submit = mocks::l1::expects_state_submissions(vec![]); + + let fuel_mock = mocks::fuel::latest_height_is(150); + + let registry = prometheus::Registry::new(); + + let mut state_committer = StateCommitter::new( + l1_mock_submit, + fuel_mock, + setup.db(), + StateCommitterConfig { + lookback_window: 100, + fragment_accumulation_timeout: Duration::from_secs(60), + fragments_to_accumulate: 10.try_into().unwrap(), + ..Default::default() + }, + test_clock.clone(), + noop_fees(), + ); + + state_committer.register_metrics(®istry); + + // when + state_committer.run().await?; + + // then + let gathered_metrics = registry.gather(); + let metric = gathered_metrics + .iter() + .find(|m| m.get_name() == "current_height_to_commit") + .expect("Metric `current_height_to_commit` should be present"); + + // Extract the gauge value + let metric_value = metric + .get_metric() + .iter() + .next() + .and_then(|m| m.get_gauge().get_value().into()) + .expect("Metric `current_height_to_commit` should have a value"); + + assert_eq!( + metric_value, 50., + "current_height_to_commit should be set to latest_height - lookback_window (150 - 100 = 0)" + ); + + Ok(()) +} diff --git a/packages/services/tests/state_listener.rs b/packages/services/tests/state_listener.rs index b60460cc..6cf62cd4 100644 --- a/packages/services/tests/state_listener.rs +++ b/packages/services/tests/state_listener.rs @@ -8,7 +8,10 @@ use services::{ Result, Runner, StateCommitter, StateCommitterConfig, }; use test_case::test_case; -use test_helpers::mocks::{self, l1::TxStatus}; +use test_helpers::{ + mocks::{self, l1::TxStatus}, + noop_fees, +}; #[tokio::test] async fn successful_finalized_tx() -> Result<()> { @@ -438,8 +441,14 @@ async fn block_inclusion_of_replacement_leaves_no_pending_txs() -> Result<()> { nonce, ..Default::default() }; + let mut l1_mock = + mocks::l1::expects_state_submissions(vec![(None, orig_tx), (None, replacement_tx)]); + l1_mock + .expect_current_height() + .returning(|| Box::pin(async { Ok(0) })); + let mut committer = StateCommitter::new( - mocks::l1::expects_state_submissions(vec![(None, orig_tx), (None, replacement_tx)]), + l1_mock, mocks::fuel::latest_height_is(0), setup.db(), StateCommitterConfig { @@ -447,6 +456,7 @@ async fn block_inclusion_of_replacement_leaves_no_pending_txs() -> Result<()> { ..Default::default() }, test_clock.clone(), + noop_fees(), ); // Orig tx @@ -535,8 +545,14 @@ async fn finalized_replacement_tx_will_leave_no_pending_tx( ..Default::default() }; + let mut l1_mock = + mocks::l1::expects_state_submissions(vec![(None, orig_tx), (None, replacement_tx)]); + l1_mock + .expect_current_height() + .returning(|| Box::pin(async { Ok(0) })); + let mut committer = StateCommitter::new( - mocks::l1::expects_state_submissions(vec![(None, orig_tx), (None, replacement_tx)]), + l1_mock, mocks::fuel::latest_height_is(0), setup.db(), crate::StateCommitterConfig { @@ -544,6 +560,7 @@ async fn finalized_replacement_tx_will_leave_no_pending_tx( ..Default::default() }, test_clock.clone(), + noop_fees(), ); // Orig tx diff --git a/packages/services/tests/status_reporter.rs b/packages/services/tests/status_reporter.rs index faebed55..f75366bd 100644 --- a/packages/services/tests/status_reporter.rs +++ b/packages/services/tests/status_reporter.rs @@ -2,8 +2,10 @@ use std::sync::Arc; use clock::TestClock; use rand::Rng; -use services::status_reporter::service::{Status, StatusReport, StatusReporter}; -use services::types::{BlockSubmission, BlockSubmissionTx}; +use services::{ + status_reporter::service::{Status, StatusReport, StatusReporter}, + types::{BlockSubmission, BlockSubmissionTx}, +}; use storage::PostgresProcess; #[tokio::test] diff --git a/packages/test-helpers/src/lib.rs b/packages/test-helpers/src/lib.rs index f73fc475..c1c2b263 100644 --- a/packages/test-helpers/src/lib.rs +++ b/packages/test-helpers/src/lib.rs @@ -8,16 +8,18 @@ use fuel_block_committer_encoding::bundle::{self, CompressionLevel}; use metrics::prometheus::IntGauge; use mocks::l1::TxStatus; use rand::{Rng, RngCore}; -use services::types::{ - BlockSubmission, CollectNonEmpty, CompressedFuelBlock, Fragment, L1Tx, NonEmpty, -}; -use storage::{DbWithProcess, PostgresProcess}; - -use services::{block_committer::service::BlockCommitter, Runner}; use services::{ - block_importer::service::BlockImporter, state_listener::service::StateListener, BlockBundler, - BlockBundlerConfig, BundlerFactory, StateCommitter, + block_committer::service::BlockCommitter, + block_importer::service::BlockImporter, + fees::{ + testing::{ConstantFeeApi, PreconfiguredFeeApi}, + Fees, + }, + state_listener::service::StateListener, + types::{BlockSubmission, CollectNonEmpty, CompressedFuelBlock, Fragment, L1Tx, NonEmpty}, + BlockBundler, BlockBundlerConfig, BundlerFactory, Runner, StateCommitter, }; +use storage::{DbWithProcess, PostgresProcess}; pub fn random_data(size: impl Into) -> NonEmpty { let size = size.into(); @@ -484,6 +486,16 @@ pub mod mocks { } } +pub fn noop_fees() -> ConstantFeeApi { + ConstantFeeApi::new(Fees::default()) +} + +pub fn preconfigured_fees( + fee_sequence: impl IntoIterator, +) -> PreconfiguredFeeApi { + PreconfiguredFeeApi::new(fee_sequence) +} + pub struct Setup { db: DbWithProcess, test_clock: TestClock, @@ -531,15 +543,20 @@ impl Setup { } pub async fn send_fragments(&self, eth_tx: [u8; 32], eth_nonce: u32) { + let mut l1_mock = mocks::l1::expects_state_submissions(vec![( + None, + L1Tx { + hash: eth_tx, + nonce: eth_nonce, + ..Default::default() + }, + )]); + l1_mock + .expect_current_height() + .return_once(move || Box::pin(async { Ok(0) })); + StateCommitter::new( - mocks::l1::expects_state_submissions(vec![( - None, - L1Tx { - hash: eth_tx, - nonce: eth_nonce, - ..Default::default() - }, - )]), + l1_mock, mocks::fuel::latest_height_is(0), self.db(), services::StateCommitterConfig { @@ -547,9 +564,10 @@ impl Setup { fragment_accumulation_timeout: Duration::from_secs(0), fragments_to_accumulate: 1.try_into().unwrap(), gas_bump_timeout: Duration::from_secs(300), - tx_max_fee: 1_000_000_000, + ..Default::default() }, self.test_clock.clone(), + noop_fees(), ) .run() .await @@ -563,7 +581,7 @@ impl Setup { pub async fn commit_block_bundle(&self, eth_tx: [u8; 32], eth_nonce: u32, height: u32) { self.insert_fragments(height, 6).await; - let l1_mock = mocks::l1::expects_state_submissions(vec![( + let mut l1_mock = mocks::l1::expects_state_submissions(vec![( None, L1Tx { hash: eth_tx, @@ -571,6 +589,10 @@ impl Setup { ..Default::default() }, )]); + l1_mock + .expect_current_height() + .return_once(move || Box::pin(async { Ok(0) })); + let fuel_mock = mocks::fuel::latest_height_is(height); let mut committer = StateCommitter::new( l1_mock, @@ -581,9 +603,10 @@ impl Setup { fragment_accumulation_timeout: Duration::from_secs(0), fragments_to_accumulate: 1.try_into().unwrap(), gas_bump_timeout: Duration::from_secs(300), - tx_max_fee: 1_000_000_000, + ..Default::default() }, self.test_clock.clone(), + noop_fees(), ); committer.run().await.unwrap(); diff --git a/run_tests.sh b/run_tests.sh index 3280743e..9c074973 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -9,4 +9,4 @@ cargo test --manifest-path "$workspace_cargo_manifest" --workspace --exclude e2e # So that we may have a binary in `target/release` cargo build --release --manifest-path "$workspace_cargo_manifest" --bin fuel-block-committer -PATH="$script_location/target/release:$PATH" cargo test --manifest-path "$workspace_cargo_manifest" --package e2e -- --test-threads=1 +PATH="$script_location/target/release:$PATH" cargo test --manifest-path "$workspace_cargo_manifest" --package e2e -- --test-threads=1 --nocapture