,
+ /// Transaction pool instance.
+ pub pool: Arc,
+ /// Graph pool instance.
+ pub graph: Arc>,
+ /// Ethereum transaction converter.
+ pub converter: Option,
+ /// The Node authority flag
+ pub is_authority: bool,
+ /// Whether to enable dev signer
+ pub enable_dev_signer: bool,
+ /// Network service
+ pub network: Arc,
+ /// Chain syncing service
+ pub sync_service: Arc>,
+ /// Frontier Backend.
+ pub frontier_backend: Arc>,
+ /// Ethereum data access overrides.
+ pub storage_override: Arc>,
+ /// Cache for Ethereum block data.
+ pub block_data_cache: Arc>,
+ /// EthFilterApi pool.
+ pub filter_pool: FilterPool,
+ /// Maximum number of logs in a query.
+ pub max_past_logs: u32,
+ /// Fee history cache.
+ pub fee_history_cache: FeeHistoryCache,
+ /// Maximum fee history cache size.
+ pub fee_history_cache_limit: FeeHistoryCacheLimit,
+ /// Maximum allowed gas limit will be ` block.gas_limit * execute_gas_limit_multiplier` when
+ /// using eth_call/eth_estimateGas.
+ pub execute_gas_limit_multiplier: u64,
+}
+
+/// Instantiate Ethereum-compatible RPC extensions.
+pub fn create_eth(
+ mut io: RpcModule<()>,
+ deps: EthDeps,
+ subscription_task_executor: SubscriptionTaskExecutor,
+ pubsub_notification_sinks: Arc<
+ fc_mapping_sync::EthereumBlockNotificationSinks<
+ fc_mapping_sync::EthereumBlockNotification,
+ >,
+ >,
+) -> Result, Box>
+where
+ C: ProvideRuntimeApi,
+ C::Api:
+ BlockBuilderApi + EthereumRuntimeRPCApi + ConvertTransactionRuntimeApi,
+ C: BlockchainEvents + 'static,
+ C: HeaderBackend
+ + HeaderMetadata
+ + StorageProvider,
+ C: CallApiAt,
+ BE: Backend + 'static,
+ BE::State: StateBackend,
+ P: TransactionPool + 'static,
+ A: ChainApi + 'static,
+ CT: ConvertTransaction<::Extrinsic> + Send + Sync + 'static,
+{
+ use fc_rpc::{
+ Debug, DebugApiServer, Eth, EthApiServer, EthDevSigner, EthFilter, EthFilterApiServer,
+ EthPubSub, EthPubSubApiServer, EthSigner, Net, NetApiServer, TxPool, TxPoolApiServer, Web3,
+ Web3ApiServer,
+ };
+
+ let EthDeps {
+ client,
+ pool,
+ graph,
+ converter,
+ is_authority,
+ enable_dev_signer,
+ network,
+ sync_service,
+ frontier_backend,
+ storage_override,
+ block_data_cache,
+ filter_pool,
+ max_past_logs,
+ fee_history_cache,
+ fee_history_cache_limit,
+ execute_gas_limit_multiplier,
+ } = deps;
+
+ let mut signers = Vec::new();
+ if enable_dev_signer {
+ signers.push(Box::new(EthDevSigner::new()) as Box);
+ }
+
+ let pending_create_inherent_data_providers = move |_, _| async move {
+ let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
+ // Create a dummy parachain inherent data provider which is required to pass
+ // the checks by the para chain system. We use dummy values because in the 'pending context'
+ // neither do we have access to the real values nor do we need them.
+ let (relay_parent_storage_root, relay_chain_state) =
+ RelayStateSproofBuilder::default().into_state_root_and_proof();
+ let vfp = PersistedValidationData {
+ // This is a hack to make
+ // `cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases` happy. Relay parent
+ // number can't be bigger than u32::MAX.
+ relay_parent_number: u32::MAX,
+ relay_parent_storage_root,
+ ..Default::default()
+ };
+ let parachain_inherent_data = ParachainInherentData {
+ validation_data: vfp,
+ relay_chain_state,
+ downward_messages: Default::default(),
+ horizontal_messages: Default::default(),
+ };
+ Ok((timestamp, parachain_inherent_data))
+ };
+
+ io.merge(
+ Eth::<_, _, _, _, _, _, _, BifrostEthConfig<_, _>>::new(
+ client.clone(),
+ pool.clone(),
+ graph.clone(),
+ converter,
+ sync_service.clone(),
+ signers,
+ storage_override.clone(),
+ frontier_backend.clone(),
+ is_authority,
+ block_data_cache.clone(),
+ fee_history_cache,
+ fee_history_cache_limit,
+ execute_gas_limit_multiplier,
+ None,
+ pending_create_inherent_data_providers,
+ None,
+ )
+ .replace_config::>()
+ .into_rpc(),
+ )?;
+
+ io.merge(
+ EthFilter::new(
+ client.clone(),
+ frontier_backend.clone(),
+ graph.clone(),
+ filter_pool,
+ 500_usize, // max stored filters
+ max_past_logs,
+ block_data_cache.clone(),
+ )
+ .into_rpc(),
+ )?;
+
+ io.merge(
+ EthPubSub::new(
+ pool,
+ client.clone(),
+ sync_service,
+ subscription_task_executor,
+ storage_override.clone(),
+ pubsub_notification_sinks,
+ )
+ .into_rpc(),
+ )?;
+
+ io.merge(
+ Net::new(
+ client.clone(),
+ network,
+ // Whether to format the `peer_count` response as Hex (default) or not.
+ true,
+ )
+ .into_rpc(),
+ )?;
+
+ io.merge(Web3::new(client.clone()).into_rpc())?;
+
+ io.merge(
+ Debug::new(client.clone(), frontier_backend, storage_override, block_data_cache).into_rpc(),
+ )?;
+
+ io.merge(TxPool::new(client, graph).into_rpc())?;
+
+ Ok(io)
+}
diff --git a/node/rpc/src/lib.rs b/node/rpc/src/lib.rs
index 0fef634c8..41dc9059b 100644
--- a/node/rpc/src/lib.rs
+++ b/node/rpc/src/lib.rs
@@ -26,7 +26,7 @@
//!
//! The RPCs available in this crate however can make some assumptions
//! about how the runtime is constructed and what FRAME pallets
-//! are part of it. Therefore all node-runtime-specific RPCs can
+//! are part of it. Therefore, all node-runtime-specific RPCs can
//! be placed here or imported from corresponding FRAME RPC definitions.
#![warn(missing_docs)]
@@ -37,6 +37,7 @@ use bifrost_farming_rpc::{FarmingRpc, FarmingRpcApiServer};
use bifrost_farming_rpc_runtime_api::FarmingRuntimeApi;
use bifrost_flexible_fee_rpc::{FeeRpcApiServer, FlexibleFeeRpc};
use bifrost_flexible_fee_rpc_runtime_api::FlexibleFeeRuntimeApi as FeeRuntimeApi;
+use bifrost_polkadot_runtime::Hash;
use bifrost_primitives::{AccountId, Balance, Block, CurrencyId, Nonce, ParaId, PoolId};
use bifrost_salp_rpc::{SalpRpc, SalpRpcApiServer};
use bifrost_salp_rpc_runtime_api::SalpRuntimeApi;
@@ -46,9 +47,11 @@ use bifrost_ve_minting_rpc::{VeMintingRpc, VeMintingRpcApiServer};
use bifrost_ve_minting_rpc_runtime_api::VeMintingRuntimeApi;
use bifrost_vtoken_minting_rpc::{VtokenMintingRpc, VtokenMintingRpcApiServer};
use bifrost_vtoken_minting_rpc_runtime_api::VtokenMintingRuntimeApi;
+use futures::channel::mpsc;
use lend_market_rpc::{LendMarket, LendMarketApiServer};
use lend_market_rpc_runtime_api::LendMarketApi;
use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
+use sc_consensus_manual_seal::rpc::{EngineCommand, ManualSeal, ManualSealApiServer};
use sc_rpc_api::DenyUnsafe;
use sc_transaction_pool_api::TransactionPool;
use sp_api::ProvideRuntimeApi;
@@ -60,6 +63,10 @@ use zenlink_protocol::AssetId;
use zenlink_protocol_rpc::{ZenlinkProtocol, ZenlinkProtocolApiServer};
use zenlink_protocol_runtime_api::ZenlinkProtocolApi as ZenlinkProtocolRuntimeApi;
use zenlink_stable_amm_rpc::{StableAmm, StableAmmApiServer};
+
+mod eth;
+pub use self::eth::{create_eth, EthDeps};
+
/// Full client dependencies.
pub struct FullDeps {
/// The client instance to use.
@@ -70,6 +77,18 @@ pub struct FullDeps {
pub deny_unsafe: DenyUnsafe,
}
+/// Full client dependencies.
+pub struct FullDepsPolkadot {
+ /// The client instance to use.
+ pub client: Arc,
+ /// Transaction pool instance.
+ pub pool: Arc,
+ /// Whether to deny unsafe calls
+ pub deny_unsafe: DenyUnsafe,
+ /// Manual seal command sink
+ pub command_sink: Option>>,
+}
+
/// A IO handler that uses all Full RPC extensions.
pub type RpcExtension = jsonrpsee::RpcModule<()>;
@@ -118,7 +137,7 @@ where
/// RPC of bifrost-polkadot runtime.
pub fn create_full_polkadot(
- deps: FullDeps,
+ deps: FullDepsPolkadot,
) -> Result>
where
C: ProvideRuntimeApi
@@ -142,7 +161,7 @@ where
P: TransactionPool + Sync + Send + 'static,
{
let mut module = RpcExtension::new(());
- let FullDeps { client, pool, deny_unsafe } = deps;
+ let FullDepsPolkadot { client, pool, deny_unsafe, command_sink } = deps;
module.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?;
module.merge(TransactionPayment::new(client.clone()).into_rpc())?;
@@ -156,5 +175,13 @@ where
module.merge(LendMarket::new(client.clone()).into_rpc())?;
module.merge(VtokenMintingRpc::new(client).into_rpc())?;
+ if let Some(command_sink) = command_sink {
+ module.merge(
+ // We provide the rpc handler with the sending end of the channel to allow the rpc
+ // send EngineCommands to the background block authorship task.
+ ManualSeal::new(command_sink).into_rpc(),
+ )?;
+ }
+
Ok(module)
}
diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml
index da5e8ae5b..e2470953b 100644
--- a/node/service/Cargo.toml
+++ b/node/service/Cargo.toml
@@ -6,6 +6,7 @@ edition = "2021"
[dependencies]
# Substrate Client
+async-trait = { workspace = true }
sc-basic-authorship = { workspace = true }
sc-chain-spec = { workspace = true }
sc-client-api = { workspace = true }
@@ -57,7 +58,6 @@ pallet-transaction-payment-rpc = { workspace = true }
frame-system-rpc-runtime-api = { workspace = true }
substrate-prometheus-endpoint = { workspace = true }
substrate-frame-rpc-system = { workspace = true }
-try-runtime-cli = { workspace = true, optional = true }
pallet-transaction-payment-rpc-runtime-api = { workspace = true }
# Cumulus dependencies
@@ -71,6 +71,7 @@ cumulus-primitives-core = { workspace = true }
cumulus-client-service = { workspace = true }
cumulus-primitives-parachain-inherent = { workspace = true }
cumulus-relay-chain-interface = { workspace = true }
+cumulus-test-relay-sproof-builder = { workspace = true }
# Polkadot dependencies
polkadot-cli = { workspace = true }
@@ -81,10 +82,12 @@ xcm = { workspace = true }
bifrost-parachain-staking = { workspace = true }
# External Crates
+clap = { workspace = true, features = ["derive"] }
hex-literal = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
log = { workspace = true }
+flume = { workspace = true }
futures = { workspace = true, features = ["compat"] }
# Runtimes
@@ -95,13 +98,27 @@ bifrost-primitives = { workspace = true }
bifrost-rpc = { workspace = true }
# RPC related dependencies
-jsonrpsee = { workspace = true, features = [ "macros", "server" ] }
+jsonrpsee = { workspace = true, features = ["macros", "server"] }
# zenlink rpc
zenlink-protocol = { workspace = true }
zenlink-protocol-runtime-api = { workspace = true }
zenlink-stable-amm-runtime-api = { workspace = true }
+# Frontier
+fc-consensus = { workspace = true }
+fc-db = { workspace = true, features = ["default", "sql"] }
+fc-mapping-sync = { workspace = true, features = ["sql"] }
+fc-rpc = { workspace = true, features = ["txpool"] }
+fc-rpc-core = { workspace = true }
+fc-storage = { workspace = true }
+fp-storage = { workspace = true }
+fp-consensus = { workspace = true }
+fp-account = { workspace = true }
+fp-dynamic-fee = { workspace = true, features = ["default"] }
+fp-evm = { workspace = true, features = ["default"] }
+fp-rpc = { workspace = true, features = ["default"] }
+
# Bifrost rpc
bifrost-flexible-fee-rpc-runtime-api = { workspace = true }
bifrost-salp-rpc-runtime-api = { workspace = true }
@@ -137,5 +154,4 @@ try-runtime = [
fast-runtime = [
"bifrost-kusama-runtime/fast-runtime",
"bifrost-polkadot-runtime/fast-runtime",
-]
-
+]
\ No newline at end of file
diff --git a/node/service/src/chain_spec/bifrost_kusama.rs b/node/service/src/chain_spec/bifrost_kusama.rs
index a0561d014..bd7c17202 100644
--- a/node/service/src/chain_spec/bifrost_kusama.rs
+++ b/node/service/src/chain_spec/bifrost_kusama.rs
@@ -18,7 +18,7 @@
use bifrost_kusama_runtime::{
constants::currency::DOLLARS, AccountId, Balance, BalancesConfig, BlockNumber,
- DefaultBlocksPerRound, InflationInfo, Range, RuntimeGenesisConfig, SS58Prefix, VestingConfig,
+ DefaultBlocksPerRound, InflationInfo, Range, SS58Prefix, VestingConfig,
};
use bifrost_primitives::{CurrencyId, CurrencyId::*, TokenInfo, TokenSymbol::*};
use bifrost_runtime_common::AuraId;
@@ -42,7 +42,7 @@ use crate::chain_spec::{get_account_id_from_seed, get_from_seed, RelayExtensions
const DEFAULT_PROTOCOL_ID: &str = "bifrost";
/// Specialized `ChainSpec` for the bifrost runtime.
-pub type ChainSpec = sc_service::GenericChainSpec;
+pub type ChainSpec = sc_service::GenericChainSpec;
#[allow(non_snake_case)]
pub fn ENDOWMENT() -> u128 {
@@ -219,7 +219,7 @@ pub fn local_testnet_config() -> ChainSpec {
ChainSpec::builder(
bifrost_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"),
- RelayExtensions { relay_chain: "kusama-local".into(), para_id: PARA_ID },
+ RelayExtensions { relay_chain: "kusama-local".into(), para_id: PARA_ID, evm_since: 1 },
)
.with_name("Bifrost Local Testnet")
.with_id("bifrost_local_testnet")
@@ -325,7 +325,7 @@ pub fn chainspec_config() -> ChainSpec {
ChainSpec::builder(
bifrost_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"),
- RelayExtensions { relay_chain: "kusama".into(), para_id: PARA_ID },
+ RelayExtensions { relay_chain: "kusama".into(), para_id: PARA_ID, evm_since: 1 },
)
.with_name("Bifrost")
.with_id("bifrost")
diff --git a/node/service/src/chain_spec/bifrost_polkadot.rs b/node/service/src/chain_spec/bifrost_polkadot.rs
index b500f9f54..ee486a4f3 100644
--- a/node/service/src/chain_spec/bifrost_polkadot.rs
+++ b/node/service/src/chain_spec/bifrost_polkadot.rs
@@ -18,21 +18,29 @@
use crate::chain_spec::{get_account_id_from_seed, get_from_seed, RelayExtensions};
use bifrost_polkadot_runtime::{
- constants::currency::DOLLARS, AccountId, Balance, BlockNumber, RuntimeGenesisConfig, SS58Prefix,
+ constants::currency::DOLLARS, AccountId, Balance, BlockNumber, SS58Prefix,
+};
+use bifrost_primitives::{
+ currency::{BNCS, DED, IBTC, INTR, PEN, PINK, USDC, WETH},
+ CurrencyId,
+ CurrencyId::*,
+ TokenInfo, TokenSymbol, ASTR, BNC, DOT, DOT_TOKEN_ID, DOT_U, FIL, GLMR, MANTA,
};
-use bifrost_primitives::{CurrencyId, CurrencyId::*, TokenInfo, TokenSymbol, DOT_TOKEN_ID};
use bifrost_runtime_common::AuraId;
use cumulus_primitives_core::ParaId;
+use fp_evm::GenesisAccount;
use frame_benchmarking::{account, whitelisted_caller};
use hex_literal::hex;
use sc_chain_spec::Properties;
use sc_service::ChainType;
-use sp_core::{crypto::UncheckedInto, sr25519};
+use sp_core::{crypto::UncheckedInto, sr25519, H160, U256};
+use sp_runtime::FixedU128;
+use std::{collections::BTreeMap, str::FromStr};
const DEFAULT_PROTOCOL_ID: &str = "bifrost_polkadot";
/// Specialized `ChainSpec` for the bifrost-polkadot runtime.
-pub type ChainSpec = sc_service::GenericChainSpec;
+pub type ChainSpec = sc_service::GenericChainSpec;
#[allow(non_snake_case)]
pub fn ENDOWMENT() -> u128 {
@@ -67,6 +75,7 @@ pub fn bifrost_polkadot_genesis(
balances: Vec<(AccountId, Balance)>,
vestings: Vec<(AccountId, BlockNumber, BlockNumber, Balance)>,
id: ParaId,
+ tokens: Vec<(AccountId, CurrencyId, Balance)>,
council_membership: Vec,
technical_committee_membership: Vec,
salp_multisig_key: AccountId,
@@ -76,8 +85,9 @@ pub fn bifrost_polkadot_genesis(
Vec<(CurrencyId, u32, u32, u32)>,
),
oracle_membership: Vec,
+ evm_accounts: BTreeMap,
) -> serde_json::Value {
- serde_json::json!( {
+ serde_json::json!({
"balances": {
"balances": balances
},
@@ -103,9 +113,9 @@ pub fn bifrost_polkadot_genesis(
.cloned()
.map(|(acc, aura)| {
(
- acc.clone(), // account id
- acc, // validator id
- bifrost_polkadot_runtime::SessionKeys { aura }, // session keys
+ acc.clone(), // account id
+ acc, // validator id
+ bifrost_polkadot_runtime::opaque::SessionKeys { aura }, // session keys
)
})
.collect::>(),
@@ -121,7 +131,19 @@ pub fn bifrost_polkadot_genesis(
"polkadotXcm": {
"safeXcmVersion": 3
},
- "salp": { "initialMultisigAccount": Some(salp_multisig_key) }
+ "salp": { "initialMultisigAccount": Some(salp_multisig_key) },
+ "tokens": { "balances": tokens },
+ "prices": {
+ "emergencyPrice": vec![
+ (DOT, FixedU128::from_inner(6_000_000_000_000_000_000u128)),
+ (WETH, FixedU128::from_inner(3000_000_000_000_000_000_000u128)),
+ (BNC, FixedU128::from_inner(250_000_000_000_000_000u128)),
+ ]
+ },
+ // EVM compatibility
+ "evmChainId": { "chainId": 996u64 },
+ "dynamicFee": { "minGasPrice": U256::from(560174200u64) },
+ "evm": { "accounts": evm_accounts },
})
}
@@ -137,24 +159,75 @@ pub fn local_testnet_config() -> ChainSpec {
account("bechmarking_account_1", 0, 0),
];
let balances = endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT())).collect();
+ let tokens = endowed_accounts
+ .iter()
+ .flat_map(|x| {
+ vec![
+ (x.clone(), DOT, ENDOWMENT() * 4_000_000),
+ (x.clone(), WETH, ENDOWMENT() * 4_000_000),
+ ]
+ })
+ .collect();
let council_membership = vec![get_account_id_from_seed::("Alice")];
let technical_committee_membership = vec![get_account_id_from_seed::("Alice")];
let oracle_membership = vec![get_account_id_from_seed::("Alice")];
let salp_multisig: AccountId =
hex!["49daa32c7287890f38b7e1a8cd2961723d36d20baa0bf3b82e0c4bdda93b1c0a"].into();
let currency = vec![
- (Native(TokenSymbol::BNC), DOLLARS / 100, None),
(
- Token2(DOT_TOKEN_ID),
- DOLLARS / 1000_000,
- Some((String::from("Polkadot DOT"), String::from("DOT"), 10u8)),
+ BNC,
+ 10_000_000_000,
+ Some((String::from("Bifrost Native Coin"), String::from("BNC"), 12u8)),
+ ),
+ (DOT, 1_000_000, Some((String::from("Polkadot DOT"), String::from("DOT"), 10u8))),
+ (
+ GLMR,
+ 1_000_000_000_000,
+ Some((String::from("Moonbeam Native Token"), String::from("GLMR"), 18u8)),
+ ),
+ (DOT_U, 1_000, Some((String::from("Tether USD"), String::from("USDT"), 6u8))),
+ (ASTR, 10_000_000_000_000_000, Some((String::from("Astar"), String::from("ASTR"), 18u8))),
+ (
+ FIL,
+ 1_000_000_000_000,
+ Some((String::from("Filecoin Network Token"), String::from("FIL"), 18u8)),
+ ),
+ (USDC, 1_000, Some((String::from("USD Coin"), String::from("USDC"), 6u8))),
+ (IBTC, 100, Some((String::from("interBTC"), String::from("IBTC"), 8u8))),
+ (INTR, 10_000_000, Some((String::from("Interlay"), String::from("INTR"), 10u8))),
+ (
+ MANTA,
+ 10_000_000_000_000,
+ Some((String::from("Manta Network"), String::from("MANTA"), 18u8)),
+ ),
+ (
+ BNCS,
+ 10_000_000_000,
+ Some((String::from("bncs-20 inscription token BNCS"), String::from("BNCS"), 12u8)),
),
+ (PINK, 100_000_000, Some((String::from("PINK"), String::from("PINK"), 10u8))),
+ (DED, 1, Some((String::from("DED"), String::from("DED"), 10u8))),
+ (PEN, 100_000_000, Some((String::from("Pendulum"), String::from("PEN"), 12u8))),
+ (WETH, 100_000_000, Some((String::from("SnowBridge WETH"), String::from("SWETH"), 18u8))),
];
let vcurrency = vec![VSToken2(DOT_TOKEN_ID), VToken(TokenSymbol::BNC), VToken2(DOT_TOKEN_ID)];
+ let mut evm_accounts = BTreeMap::new();
+ evm_accounts.insert(
+ // H160 address of CI test runner account
+ H160::from_str("6be02d1d3665660d22ff9624b7be0551ee1ac91b")
+ .expect("internal H160 is valid; qed"),
+ fp_evm::GenesisAccount {
+ balance: U256::from(1_000_000_000_000_000_000_000_000u128),
+ code: Default::default(),
+ nonce: Default::default(),
+ storage: Default::default(),
+ },
+ );
+
ChainSpec::builder(
bifrost_polkadot_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"),
- RelayExtensions { relay_chain: "polkadot-local".into(), para_id: PARA_ID },
+ RelayExtensions { relay_chain: "polkadot-local".into(), para_id: PARA_ID, evm_since: 1 },
)
.with_name("Bifrost Polkadot Local Testnet")
.with_id("bifrost_polkadot_local_testnet")
@@ -170,11 +243,122 @@ pub fn local_testnet_config() -> ChainSpec {
balances,
vec![],
PARA_ID.into(),
+ tokens,
council_membership,
technical_committee_membership,
salp_multisig,
(currency, vcurrency, vec![]),
oracle_membership,
+ evm_accounts,
+ ))
+ .with_properties(bifrost_polkadot_properties())
+ .with_protocol_id(DEFAULT_PROTOCOL_ID)
+ .build()
+}
+
+pub fn dev_config() -> ChainSpec {
+ let endowed_accounts = vec![
+ get_account_id_from_seed::("Alice"),
+ get_account_id_from_seed::("Bob"),
+ get_account_id_from_seed::("Charlie"),
+ get_account_id_from_seed::("Dave"),
+ get_account_id_from_seed::("Eve"),
+ get_account_id_from_seed::("Ferdie"),
+ whitelisted_caller(), // Benchmarking whitelist_account
+ account("bechmarking_account_1", 0, 0),
+ ];
+ let balances = endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT())).collect();
+ let tokens = endowed_accounts
+ .iter()
+ .flat_map(|x| {
+ vec![
+ (x.clone(), DOT, ENDOWMENT() * 4_000_000),
+ (x.clone(), WETH, ENDOWMENT() * 4_000_000),
+ ]
+ })
+ .collect();
+ let council_membership = vec![get_account_id_from_seed::("Alice")];
+ let technical_committee_membership = vec![get_account_id_from_seed::("Alice")];
+ let oracle_membership = vec![get_account_id_from_seed::("Alice")];
+ let salp_multisig: AccountId =
+ hex!["49daa32c7287890f38b7e1a8cd2961723d36d20baa0bf3b82e0c4bdda93b1c0a"].into();
+ let currency = vec![
+ (
+ BNC,
+ 10_000_000_000,
+ Some((String::from("Bifrost Native Coin"), String::from("BNC"), 12u8)),
+ ),
+ (DOT, 1_000_000, Some((String::from("Polkadot DOT"), String::from("DOT"), 10u8))),
+ (
+ GLMR,
+ 1_000_000_000_000,
+ Some((String::from("Moonbeam Native Token"), String::from("GLMR"), 18u8)),
+ ),
+ (DOT_U, 1_000, Some((String::from("Tether USD"), String::from("USDT"), 6u8))),
+ (ASTR, 10_000_000_000_000_000, Some((String::from("Astar"), String::from("ASTR"), 18u8))),
+ (
+ FIL,
+ 1_000_000_000_000,
+ Some((String::from("Filecoin Network Token"), String::from("FIL"), 18u8)),
+ ),
+ (USDC, 1_000, Some((String::from("USD Coin"), String::from("USDC"), 6u8))),
+ (IBTC, 100, Some((String::from("interBTC"), String::from("IBTC"), 8u8))),
+ (INTR, 10_000_000, Some((String::from("Interlay"), String::from("INTR"), 10u8))),
+ (
+ MANTA,
+ 10_000_000_000_000,
+ Some((String::from("Manta Network"), String::from("MANTA"), 18u8)),
+ ),
+ (
+ BNCS,
+ 10_000_000_000,
+ Some((String::from("bncs-20 inscription token BNCS"), String::from("BNCS"), 12u8)),
+ ),
+ (PINK, 100_000_000, Some((String::from("PINK"), String::from("PINK"), 10u8))),
+ (DED, 1, Some((String::from("DED"), String::from("DED"), 10u8))),
+ (PEN, 100_000_000, Some((String::from("Pendulum"), String::from("PEN"), 12u8))),
+ (WETH, 100_000_000, Some((String::from("SnowBridge WETH"), String::from("SWETH"), 18u8))),
+ ];
+ let vcurrency = vec![VSToken2(DOT_TOKEN_ID), VToken(TokenSymbol::BNC), VToken2(DOT_TOKEN_ID)];
+
+ let mut evm_accounts = BTreeMap::new();
+ evm_accounts.insert(
+ // H160 address of CI test runner account
+ H160::from_str("6be02d1d3665660d22ff9624b7be0551ee1ac91b")
+ .expect("internal H160 is valid; qed"),
+ fp_evm::GenesisAccount {
+ balance: U256::from(1_000_000_000_000_000_000_000_000u128),
+ code: Default::default(),
+ nonce: Default::default(),
+ storage: Default::default(),
+ },
+ );
+
+ ChainSpec::builder(
+ bifrost_polkadot_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"),
+ RelayExtensions { relay_chain: "polkadot".into(), para_id: PARA_ID, evm_since: 1 },
+ )
+ .with_name("Bifrost Polkadot Dev Testnet")
+ .with_id("dev")
+ .with_chain_type(ChainType::Development)
+ .with_genesis_config_patch(bifrost_polkadot_genesis(
+ vec![
+ (
+ get_account_id_from_seed::("Alice"),
+ get_from_seed::("Alice"),
+ ),
+ (get_account_id_from_seed::("Bob"), get_from_seed::("Bob")),
+ ],
+ balances,
+ vec![],
+ PARA_ID.into(),
+ tokens,
+ council_membership,
+ technical_committee_membership,
+ salp_multisig,
+ (currency, vcurrency, vec![]),
+ oracle_membership,
+ evm_accounts,
))
.with_properties(bifrost_polkadot_properties())
.with_protocol_id(DEFAULT_PROTOCOL_ID)
@@ -241,7 +425,7 @@ pub fn paseo_config() -> ChainSpec {
ChainSpec::builder(
bifrost_polkadot_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"),
- RelayExtensions { relay_chain: "paseo".into(), para_id: PARA_ID },
+ RelayExtensions { relay_chain: "paseo".into(), para_id: PARA_ID, evm_since: 1 },
)
.with_name("Bifrost Paseo")
.with_id("bifrost_paseo")
@@ -251,11 +435,13 @@ pub fn paseo_config() -> ChainSpec {
balances,
vec![],
PARA_ID.into(),
+ vec![],
council_membership,
technical_committee_membership,
salp_multisig,
(vec![], vec![], vec![]),
oracle_membership,
+ BTreeMap::new(),
))
.with_properties(bifrost_polkadot_properties())
.with_protocol_id(DEFAULT_PROTOCOL_ID)
@@ -294,7 +480,7 @@ pub fn chainspec_config() -> ChainSpec {
ChainSpec::builder(
bifrost_polkadot_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"),
- RelayExtensions { relay_chain: "polkadot".into(), para_id: PARA_ID },
+ RelayExtensions { relay_chain: "polkadot".into(), para_id: PARA_ID, evm_since: 1 },
)
.with_name("Bifrost Polkadot")
.with_id("bifrost_polkadot")
@@ -306,9 +492,11 @@ pub fn chainspec_config() -> ChainSpec {
PARA_ID.into(),
vec![],
vec![],
+ vec![],
salp_multisig,
(vec![], vec![], vec![]),
vec![],
+ BTreeMap::new(),
))
.with_properties(bifrost_polkadot_properties())
.with_protocol_id(DEFAULT_PROTOCOL_ID)
diff --git a/node/service/src/chain_spec/mod.rs b/node/service/src/chain_spec/mod.rs
index b5a8e00f2..e1c180ce5 100644
--- a/node/service/src/chain_spec/mod.rs
+++ b/node/service/src/chain_spec/mod.rs
@@ -21,6 +21,7 @@ pub mod bifrost_kusama;
#[cfg(any(feature = "with-bifrost-polkadot-runtime", feature = "with-bifrost-runtime"))]
pub mod bifrost_polkadot;
+use bifrost_primitives::BlockNumber;
pub use bifrost_primitives::{AccountId, Block, Signature};
use sc_chain_spec::ChainSpecExtension;
use serde::{Deserialize, Serialize};
@@ -42,11 +43,14 @@ pub struct RelayExtensions {
pub relay_chain: String,
/// The id of the Parachain.
pub para_id: u32,
+ /// EVM compatible starting block number
+ pub evm_since: BlockNumber,
}
impl RelayExtensions {
/// Try to get the extension from the given `ChainSpec`.
- pub fn try_get(chain_spec: &dyn sc_service::ChainSpec) -> Option<&Self> {
+ #[allow(clippy::borrowed_box)]
+ pub fn try_get(chain_spec: &Box) -> Option<&Self> {
sc_chain_spec::get_extension(chain_spec.extensions())
}
}
diff --git a/node/service/src/collator_kusama.rs b/node/service/src/collator_kusama.rs
index 106cbae10..2d2e41cb3 100644
--- a/node/service/src/collator_kusama.rs
+++ b/node/service/src/collator_kusama.rs
@@ -31,6 +31,7 @@ use cumulus_client_consensus_aura::collators::basic::{
use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport;
use cumulus_client_consensus_proposer::Proposer;
+use crate::eth::EthConfiguration;
use bifrost_primitives::Block;
use cumulus_client_service::{
build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks,
@@ -42,7 +43,7 @@ use polkadot_primitives::CollatorPair;
use sc_client_api::backend::Backend;
use sc_consensus::{ImportQueue, LongestChain};
use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
-use sc_network::NetworkBlock;
+use sc_network::{service::traits::NetworkBackend, NetworkBlock};
use sc_network_sync::SyncingService;
use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
@@ -51,12 +52,13 @@ use sp_keystore::KeystorePtr;
use substrate_prometheus_endpoint::Registry;
#[cfg(not(feature = "runtime-benchmarks"))]
-type HostFunctions = sp_io::SubstrateHostFunctions;
+type HostFunctions = cumulus_client_service::ParachainHostFunctions;
#[cfg(feature = "runtime-benchmarks")]
-type HostFunctions =
- (sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions);
-
+type HostFunctions = (
+ cumulus_client_service::ParachainHostFunctions,
+ frame_benchmarking::benchmarking::HostFunctions,
+);
pub type FullBackend = TFullBackend;
pub type FullClient = TFullClient>;
pub type MaybeFullSelectChain = Option>;
@@ -100,10 +102,11 @@ pub fn new_partial(
.build();
let (client, backend, keystore_container, task_manager) =
- sc_service::new_full_parts::(
+ sc_service::new_full_parts_record_import::(
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
+ true,
)?;
let client = Arc::new(client);
@@ -210,8 +213,6 @@ fn start_consensus(
overseer_handle: OverseerHandle,
announce_block: Arc>) + Send + Sync>,
) -> Result<(), sc_service::Error> {
- let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?;
-
let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
task_manager.spawn_handle(),
client.clone(),
@@ -239,7 +240,6 @@ fn start_consensus(
collator_key,
para_id,
overseer_handle,
- slot_duration,
relay_chain_slot_duration,
proposer,
collator_service,
@@ -263,14 +263,18 @@ fn start_consensus(
/// This is the actual implementation that is abstract over the executor and the
/// runtime api.
#[sc_tracing::logging::prefix_logs_with("Parachain🌈")]
-async fn start_node_impl(
+async fn start_node_impl(
parachain_config: Configuration,
polkadot_config: Configuration,
+ _eth_config: EthConfiguration,
collator_options: CollatorOptions,
sybil_resistance_level: CollatorSybilResistance,
para_id: ParaId,
hwbench: Option,
-) -> sc_service::error::Result<(TaskManager, Arc)> {
+) -> sc_service::error::Result<(TaskManager, Arc)>
+where
+ Net: NetworkBackend,
+{
let parachain_config = prepare_node_config(parachain_config);
let params = new_partial(¶chain_config, false)?;
@@ -295,7 +299,8 @@ async fn start_node_impl(
let prometheus_registry = parachain_config.prometheus_registry().cloned();
let transaction_pool = params.transaction_pool.clone();
let import_queue_service = params.import_queue.service();
- let net_config = sc_network::config::FullNetworkConfiguration::new(¶chain_config.network);
+ let net_config =
+ sc_network::config::FullNetworkConfiguration::<_, _, Net>::new(¶chain_config.network);
let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
build_network(BuildNetworkParams {
parachain_config: ¶chain_config,
@@ -323,7 +328,7 @@ async fn start_node_impl(
transaction_pool: Some(OffchainTransactionPoolFactory::new(
transaction_pool.clone(),
)),
- network_provider: network.clone(),
+ network_provider: Arc::new(network.clone()),
is_validator: parachain_config.role.is_authority(),
enable_http_requests: false,
custom_extensions: move |_| vec![],
@@ -428,16 +433,18 @@ async fn start_node_impl(
}
/// Start a normal parachain node.
-pub async fn start_node(
+pub async fn start_node>(
parachain_config: Configuration,
polkadot_config: Configuration,
+ eth_config: EthConfiguration,
collator_options: CollatorOptions,
para_id: ParaId,
hwbench: Option,
) -> sc_service::error::Result<(TaskManager, Arc)> {
- start_node_impl(
+ start_node_impl::(
parachain_config,
polkadot_config,
+ eth_config,
collator_options,
CollatorSybilResistance::Resistant,
para_id,
diff --git a/node/service/src/collator_polkadot.rs b/node/service/src/collator_polkadot.rs
index d1255f0bb..5bbe2e7b5 100644
--- a/node/service/src/collator_polkadot.rs
+++ b/node/service/src/collator_polkadot.rs
@@ -17,11 +17,15 @@
// along with this program. If not, see .
//! Service implementation. Specialized wrapper over substrate service.
-use std::{sync::Arc, time::Duration};
+use std::{
+ collections::BTreeMap,
+ sync::{Arc, Mutex},
+ time::Duration,
+};
#[cfg(any(feature = "with-bifrost-polkadot-runtime", feature = "with-bifrost-runtime"))]
pub use bifrost_polkadot_runtime;
-use bifrost_polkadot_runtime::RuntimeApi;
+use bifrost_polkadot_runtime::{RuntimeApi, TransactionConverter};
use cumulus_client_cli::CollatorOptions;
use cumulus_client_collator::service::CollatorService;
use cumulus_client_consensus_aura::collators::basic::{
@@ -31,6 +35,7 @@ use cumulus_client_consensus_aura::collators::basic::{
use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport;
use cumulus_client_consensus_proposer::Proposer;
+use crate::{chain_spec, eth, IdentifyVariant};
use bifrost_primitives::Block;
use cumulus_client_service::{
build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks,
@@ -38,11 +43,13 @@ use cumulus_client_service::{
};
use cumulus_primitives_core::{relay_chain::Hash, ParaId};
use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface};
+use fc_rpc_core::types::{FeeHistoryCache, FilterPool};
+use fc_storage::StorageOverrideHandler;
use polkadot_primitives::CollatorPair;
use sc_client_api::backend::Backend;
use sc_consensus::{ImportQueue, LongestChain};
use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
-use sc_network::NetworkBlock;
+use sc_network::{service::traits::NetworkBackend, NetworkBlock};
use sc_network_sync::SyncingService;
use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
@@ -50,12 +57,16 @@ use sc_transaction_pool_api::OffchainTransactionPoolFactory;
use sp_keystore::KeystorePtr;
use substrate_prometheus_endpoint::Registry;
+use crate::eth::{EthConfiguration, FrontierBackend};
+
#[cfg(not(feature = "runtime-benchmarks"))]
-type HostFunctions = sp_io::SubstrateHostFunctions;
+type HostFunctions = cumulus_client_service::ParachainHostFunctions;
#[cfg(feature = "runtime-benchmarks")]
-type HostFunctions =
- (sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions);
+type HostFunctions = (
+ cumulus_client_service::ParachainHostFunctions,
+ frame_benchmarking::benchmarking::HostFunctions,
+);
pub type FullBackend = TFullBackend;
pub type FullClient = TFullClient>;
@@ -72,7 +83,14 @@ pub fn new_partial(
MaybeFullSelectChain,
sc_consensus::import_queue::BasicQueue,
sc_transaction_pool::FullPool,
- (ParachainBlockImport, Option, Option),
+ (
+ eth::BlockImport,
+ Option,
+ Option,
+ Arc>,
+ FilterPool,
+ FeeHistoryCache,
+ ),
>,
sc_service::Error,
> {
@@ -100,10 +118,11 @@ pub fn new_partial(
.build();
let (client, backend, keystore_container, task_manager) =
- sc_service::new_full_parts::(
+ sc_service::new_full_parts_record_import::(
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
+ true,
)?;
let client = Arc::new(client);
@@ -126,7 +145,21 @@ pub fn new_partial(
let select_chain = if dev { Some(LongestChain::new(backend.clone())) } else { None };
- let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+ let frontier_backend = Arc::new(FrontierBackend::open(
+ Arc::clone(&client),
+ &config.database,
+ ð::db_config_dir(config),
+ )?);
+
+ let evm_since = chain_spec::RelayExtensions::try_get(&config.chain_spec)
+ .map(|e| e.evm_since)
+ .unwrap_or(1);
+ let block_import = eth::BlockImport::new(
+ ParachainBlockImport::new(client.clone(), backend.clone()),
+ client.clone(),
+ frontier_backend.clone(),
+ evm_since,
+ );
let import_queue = if dev {
sc_consensus_manual_seal::import_queue(
@@ -144,6 +177,9 @@ pub fn new_partial(
)?
};
+ let filter_pool: FilterPool = Arc::new(Mutex::new(BTreeMap::new()));
+ let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
+
Ok(PartialComponents {
backend,
client,
@@ -152,14 +188,21 @@ pub fn new_partial(
task_manager,
transaction_pool,
select_chain,
- other: (block_import, telemetry, telemetry_worker_handle),
+ other: (
+ block_import,
+ telemetry,
+ telemetry_worker_handle,
+ frontier_backend,
+ filter_pool,
+ fee_history_cache,
+ ),
})
}
/// Build the import queue for the parachain runtime.
fn build_import_queue(
client: Arc,
- block_import: ParachainBlockImport,
+ block_import: eth::BlockImport,
config: &Configuration,
telemetry: Option,
task_manager: &TaskManager,
@@ -196,7 +239,7 @@ fn build_import_queue(
fn start_consensus(
client: Arc,
- block_import: ParachainBlockImport,
+ block_import: eth::BlockImport,
prometheus_registry: Option<&Registry>,
telemetry: Option,
task_manager: &TaskManager,
@@ -210,8 +253,6 @@ fn start_consensus(
overseer_handle: OverseerHandle,
announce_block: Arc>) + Send + Sync>,
) -> Result<(), sc_service::Error> {
- let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?;
-
let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
task_manager.spawn_handle(),
client.clone(),
@@ -239,7 +280,6 @@ fn start_consensus(
collator_key,
para_id,
overseer_handle,
- slot_duration,
relay_chain_slot_duration,
proposer,
collator_service,
@@ -263,18 +303,29 @@ fn start_consensus(
/// This is the actual implementation that is abstract over the executor and the
/// runtime api.
#[sc_tracing::logging::prefix_logs_with("Parachain🌈")]
-async fn start_node_impl(
+async fn start_node_impl(
parachain_config: Configuration,
polkadot_config: Configuration,
+ eth_config: EthConfiguration,
collator_options: CollatorOptions,
sybil_resistance_level: CollatorSybilResistance,
para_id: ParaId,
hwbench: Option,
-) -> sc_service::error::Result<(TaskManager, Arc)> {
+) -> sc_service::error::Result<(TaskManager, Arc)>
+where
+ Net: NetworkBackend,
+{
let parachain_config = prepare_node_config(parachain_config);
let params = new_partial(¶chain_config, false)?;
- let (block_import, mut telemetry, telemetry_worker_handle) = params.other;
+ let (
+ block_import,
+ mut telemetry,
+ telemetry_worker_handle,
+ frontier_backend,
+ filter_pool,
+ fee_history_cache,
+ ) = params.other;
let client = params.client.clone();
let backend = params.backend.clone();
@@ -295,7 +346,8 @@ async fn start_node_impl(
let prometheus_registry = parachain_config.prometheus_registry().cloned();
let transaction_pool = params.transaction_pool.clone();
let import_queue_service = params.import_queue.service();
- let net_config = sc_network::config::FullNetworkConfiguration::new(¶chain_config.network);
+ let net_config =
+ sc_network::config::FullNetworkConfiguration::<_, _, Net>::new(¶chain_config.network);
let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
build_network(BuildNetworkParams {
parachain_config: ¶chain_config,
@@ -323,7 +375,7 @@ async fn start_node_impl(
transaction_pool: Some(OffchainTransactionPoolFactory::new(
transaction_pool.clone(),
)),
- network_provider: network.clone(),
+ network_provider: Arc::new(network.clone()),
is_validator: parachain_config.role.is_authority(),
enable_http_requests: false,
custom_extensions: move |_| vec![],
@@ -333,17 +385,73 @@ async fn start_node_impl(
);
}
+ let storage_override = Arc::new(StorageOverrideHandler::new(client.clone()));
+ let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new(
+ task_manager.spawn_handle(),
+ storage_override.clone(),
+ eth_config.eth_log_block_cache,
+ eth_config.eth_statuses_cache,
+ prometheus_registry.clone(),
+ ));
+
+ // Sinks for pubsub notifications.
+ // Everytime a new subscription is created, a new mpsc channel is added to the sink pool.
+ // The MappingSyncWorker sends through the channel on block import and the subscription emits a
+ // notification to the subscriber on receiving a message through this channel.
+ // This way we avoid race conditions when using native substrate block import notification
+ // stream.
+ let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks<
+ fc_mapping_sync::EthereumBlockNotification,
+ > = Default::default();
+ let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks);
+
let rpc_builder = {
let client = client.clone();
+ let is_authority = parachain_config.role.is_authority();
let transaction_pool = transaction_pool.clone();
-
- Box::new(move |deny_unsafe, _| {
- let deps = crate::rpc::FullDeps {
+ let network = network.clone();
+ let sync_service = sync_service.clone();
+ let frontier_backend = frontier_backend.clone();
+ let fee_history_cache = fee_history_cache.clone();
+ let filter_pool = filter_pool.clone();
+ let storage_override = storage_override.clone();
+ let pubsub_notification_sinks = pubsub_notification_sinks.clone();
+
+ Box::new(move |deny_unsafe, subscription_task_executor| {
+ let deps = crate::rpc::FullDepsPolkadot {
client: client.clone(),
pool: transaction_pool.clone(),
deny_unsafe,
+ command_sink: None,
};
- crate::rpc::create_full_polkadot(deps).map_err(Into::into)
+ let module = crate::rpc::create_full_polkadot(deps)?;
+
+ let eth_deps = crate::rpc::EthDeps {
+ client: client.clone(),
+ pool: transaction_pool.clone(),
+ graph: transaction_pool.pool().clone(),
+ converter: Some(TransactionConverter),
+ is_authority,
+ enable_dev_signer: eth_config.enable_dev_signer,
+ network: network.clone(),
+ sync_service: sync_service.clone(),
+ frontier_backend: frontier_backend.clone(),
+ storage_override: storage_override.clone(),
+ block_data_cache: block_data_cache.clone(),
+ filter_pool: filter_pool.clone(),
+ max_past_logs: eth_config.max_past_logs,
+ fee_history_cache: fee_history_cache.clone(),
+ fee_history_cache_limit: eth_config.fee_history_limit,
+ execute_gas_limit_multiplier: eth_config.execute_gas_limit_multiplier,
+ };
+
+ crate::rpc::create_eth(
+ module,
+ eth_deps,
+ subscription_task_executor,
+ pubsub_notification_sinks.clone(),
+ )
+ .map_err(Into::into)
})
};
@@ -354,7 +462,7 @@ async fn start_node_impl(
task_manager: &mut task_manager,
config: parachain_config,
keystore: params.keystore_container.keystore(),
- backend,
+ backend: backend.clone(),
network: network.clone(),
sync_service: sync_service.clone(),
system_rpc_tx,
@@ -362,6 +470,19 @@ async fn start_node_impl(
telemetry: telemetry.as_mut(),
})?;
+ eth::spawn_frontier_tasks(
+ &task_manager,
+ client.clone(),
+ backend,
+ frontier_backend,
+ filter_pool,
+ storage_override,
+ fee_history_cache,
+ eth_config.fee_history_limit,
+ sync_service.clone(),
+ pubsub_notification_sinks,
+ );
+
if let Some(hwbench) = hwbench {
sc_sysinfo::print_hwbench(&hwbench);
@@ -428,20 +549,26 @@ async fn start_node_impl(
}
/// Start a normal parachain node.
-pub async fn start_node(
+pub async fn start_node>(
parachain_config: Configuration,
polkadot_config: Configuration,
+ eth_config: EthConfiguration,
collator_options: CollatorOptions,
para_id: ParaId,
hwbench: Option,
) -> sc_service::error::Result<(TaskManager, Arc)> {
- start_node_impl(
- parachain_config,
- polkadot_config,
- collator_options,
- CollatorSybilResistance::Resistant,
- para_id,
- hwbench,
- )
- .await
+ if parachain_config.chain_spec.is_dev() {
+ crate::dev::start_node::(parachain_config, eth_config).await
+ } else {
+ start_node_impl::(
+ parachain_config,
+ polkadot_config,
+ eth_config,
+ collator_options,
+ CollatorSybilResistance::Resistant,
+ para_id,
+ hwbench,
+ )
+ .await
+ }
}
diff --git a/node/service/src/dev.rs b/node/service/src/dev.rs
index c4605d55f..69ff11c62 100644
--- a/node/service/src/dev.rs
+++ b/node/service/src/dev.rs
@@ -16,158 +16,260 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see .
-use std::sync::Arc;
+use std::{cell::RefCell, sync::Arc};
-use cumulus_primitives_parachain_inherent::MockValidationDataInherentDataProvider;
-use futures::StreamExt;
-use sc_executor::NativeElseWasmExecutor;
-use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
+use cumulus_primitives_core::relay_chain::Hash;
+use fc_storage::StorageOverrideHandler;
+use jsonrpsee::core::async_trait;
+use sc_client_api::Backend;
+use sc_network::NetworkBackend;
+use sc_service::{Configuration, TaskManager};
+use sc_transaction_pool_api::OffchainTransactionPoolFactory;
+
+use bifrost_polkadot_runtime::{constants::time::SLOT_DURATION, TransactionConverter};
+
+use crate::{
+ collator_polkadot::FullClient,
+ eth::{spawn_frontier_tasks, EthConfiguration},
+};
pub type Block = bifrost_primitives::Block;
-pub type Executor = crate::collator_kusama::BifrostExecutor;
-pub type RuntimeApi = crate::collator_kusama::bifrost_kusama_runtime::RuntimeApi;
-pub type FullClient =
- sc_service::TFullClient>;
+pub type RuntimeApi = bifrost_polkadot_runtime::RuntimeApi;
+
pub type FullBackend = sc_service::TFullBackend;
pub type FullSelectChain = sc_consensus::LongestChain;
-pub fn default_mock_parachain_inherent_data_provider() -> MockValidationDataInherentDataProvider {
- MockValidationDataInherentDataProvider {
- current_para_block: 0,
- relay_offset: 1000,
- relay_blocks_per_para_block: 2,
- xcm_config: Default::default(),
- raw_downward_messages: vec![],
- raw_horizontal_messages: vec![],
+thread_local!(static TIMESTAMP: RefCell = const { RefCell::new(0) });
+
+/// Provide a mock duration starting at 0 in millisecond for timestamp inherent.
+/// Each call will increment timestamp by slot_duration making Aura think time has passed.
+struct MockTimestampInherentDataProvider;
+
+#[async_trait]
+impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider {
+ async fn provide_inherent_data(
+ &self,
+ inherent_data: &mut sp_inherents::InherentData,
+ ) -> Result<(), sp_inherents::Error> {
+ TIMESTAMP.with(|x| {
+ *x.borrow_mut() += SLOT_DURATION;
+ inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, &*x.borrow())
+ })
+ }
+
+ async fn try_handle_error(
+ &self,
+ _identifier: &sp_inherents::InherentIdentifier,
+ _error: &[u8],
+ ) -> Option> {
+ // The pallet never reports error.
+ None
}
}
/// Builds a new development service. This service uses manual seal, and mocks
/// the parachain inherent.
-pub fn start_node(config: Configuration) -> Result {
- let sc_service::PartialComponents {
- client,
- backend,
- mut task_manager,
- import_queue,
- keystore_container,
- select_chain: maybe_select_chain,
- transaction_pool,
- other: (_, _),
- } = crate::collator_kusama::new_partial::<
- bifrost_kusama_runtime::RuntimeApi,
- crate::collator_kusama::BifrostExecutor,
- >(&config, true)?;
-
- let (network, system_rpc_tx, network_starter) =
+/// Before calling this function, you must set OnTimestampSet in runtime to be ().
+pub async fn start_node(
+ parachain_config: Configuration,
+ eth_config: EthConfiguration,
+) -> sc_service::error::Result<(TaskManager, Arc)>
+where
+ Net: NetworkBackend,
+{
+ let params = crate::collator_polkadot::new_partial(¶chain_config, true)?;
+ let (
+ _block_import,
+ mut telemetry,
+ _telemetry_worker_handle,
+ frontier_backend,
+ filter_pool,
+ fee_history_cache,
+ ) = params.other;
+
+ let client = params.client.clone();
+ let backend = params.backend.clone();
+ let mut task_manager = params.task_manager;
+
+ let transaction_pool = params.transaction_pool.clone();
+ let net_config =
+ sc_network::config::FullNetworkConfiguration::<_, _, Net>::new(¶chain_config.network);
+ let metrics = Net::register_notification_metrics(
+ parachain_config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
+ );
+
+ let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
sc_service::build_network(sc_service::BuildNetworkParams {
- config: &config,
+ config: ¶chain_config,
+ net_config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
- import_queue,
+ import_queue: params.import_queue,
block_announce_validator_builder: None,
- warp_sync: None,
+ warp_sync_params: None,
+ block_relay: None,
+ metrics,
})?;
- if config.offchain_worker.enabled {
- let offchain_workers = Arc::new(sc_offchain::OffchainWorkers::new_with_options(
- client.clone(),
- sc_offchain::OffchainWorkerOptions { enable_http_requests: false },
- ));
+ let prometheus_registry = parachain_config.prometheus_registry().cloned();
+
+ if parachain_config.offchain_worker.enabled {
+ use futures::FutureExt;
- // Start the offchain workers to have
+ let backend_ofc = backend.clone();
task_manager.spawn_handle().spawn(
- "offchain-notifications",
- None,
- sc_offchain::notification_future(
- config.role.is_authority(),
- client.clone(),
- offchain_workers,
- task_manager.spawn_handle(),
- network.clone(),
- ),
+ "offchain-workers-runner",
+ "offchain-work",
+ sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
+ runtime_api_provider: client.clone(),
+ keystore: Some(params.keystore_container.keystore()),
+ offchain_db: backend_ofc.offchain_storage(),
+ transaction_pool: Some(OffchainTransactionPoolFactory::new(
+ transaction_pool.clone(),
+ )),
+ network_provider: Arc::new(network.clone()),
+ is_validator: parachain_config.role.is_authority(),
+ enable_http_requests: false,
+ custom_extensions: move |_| vec![],
+ })
+ .run(client.clone(), task_manager.spawn_handle())
+ .boxed(),
);
}
- let prometheus_registry = config.prometheus_registry().cloned();
- let role = config.role.clone();
-
- let select_chain = maybe_select_chain
- .expect("In dev mode, `new_partial` will return some `select_chain`; qed");
+ let select_chain = params
+ .select_chain
+ .expect("In `dev` mode, `new_partial` will return some `select_chain`; qed");
- if role.is_authority() {
- let proposer_factory = sc_basic_authorship::ProposerFactory::new(
- task_manager.spawn_handle(),
- client.clone(),
- transaction_pool.clone(),
- prometheus_registry.as_ref(),
- None,
- );
+ let proposer_factory = sc_basic_authorship::ProposerFactory::new(
+ task_manager.spawn_handle(),
+ client.clone(),
+ transaction_pool.clone(),
+ None,
+ None,
+ );
- let pool = transaction_pool.pool().clone();
- let commands_stream = pool.validated_pool().import_notification_stream().map(|_| {
- sc_consensus_manual_seal::rpc::EngineCommand::SealNewBlock {
- create_empty: false,
- finalize: true,
- parent_hash: None,
- sender: None,
- }
+ // Channel for the rpc handler to communicate with the authorship task.
+ let (command_sink, commands_stream) = futures::channel::mpsc::channel(1024);
+ let authorship_future =
+ sc_consensus_manual_seal::run_manual_seal(sc_consensus_manual_seal::ManualSealParams {
+ block_import: client.clone(),
+ env: proposer_factory,
+ client: client.clone(),
+ pool: transaction_pool.clone(),
+ commands_stream,
+ select_chain,
+ consensus_data_provider: None,
+ create_inherent_data_providers: move |_, ()| async move {
+ Ok(sp_timestamp::InherentDataProvider::from_system_time())
+ },
});
+ // we spawn the future on a background thread managed by service.
+ task_manager.spawn_essential_handle().spawn_blocking(
+ "manual-seal",
+ Some("block-authoring"),
+ authorship_future,
+ );
- let authorship_future =
- sc_consensus_manual_seal::run_manual_seal(sc_consensus_manual_seal::ManualSealParams {
- block_import: client.clone(),
- env: proposer_factory,
- client: client.clone(),
- pool: transaction_pool.clone(),
- commands_stream,
- select_chain,
- consensus_data_provider: None,
- create_inherent_data_providers: |_, _| async {
- Ok((
- sp_timestamp::InherentDataProvider::from_system_time(),
- default_mock_parachain_inherent_data_provider(),
- ))
- },
- });
- // we spawn the future on a background thread managed by service.
- task_manager.spawn_essential_handle().spawn_blocking(
- "instant-seal",
- Some("block-authoring"),
- authorship_future,
- );
- }
+ let storage_override = Arc::new(StorageOverrideHandler::new(client.clone()));
+ let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new(
+ task_manager.spawn_handle(),
+ storage_override.clone(),
+ eth_config.eth_log_block_cache,
+ eth_config.eth_statuses_cache,
+ prometheus_registry.clone(),
+ ));
+
+ // Sinks for pubsub notifications.
+ // Everytime a new subscription is created, a new mpsc channel is added to the sink pool.
+ // The MappingSyncWorker sends through the channel on block import and the subscription emits a
+ // notification to the subscriber on receiving a message through this channel.
+ // This way we avoid race conditions when using native substrate block import notification
+ // stream.
+ let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks<
+ fc_mapping_sync::EthereumBlockNotification,
+ > = Default::default();
+ let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks);
- let rpc_extensions_builder = {
+ let rpc_builder = {
let client = client.clone();
+ let is_authority = parachain_config.role.is_authority();
let transaction_pool = transaction_pool.clone();
- Box::new(move |deny_unsafe, _| {
- let deps = crate::rpc::FullDeps {
+ let network = network.clone();
+ let sync_service = sync_service.clone();
+ let frontier_backend = frontier_backend.clone();
+ let fee_history_cache = fee_history_cache.clone();
+ let filter_pool = filter_pool.clone();
+ let storage_override = storage_override.clone();
+ let pubsub_notification_sinks = pubsub_notification_sinks.clone();
+
+ Box::new(move |deny_unsafe, subscription_task_executor| {
+ let deps = crate::rpc::FullDepsPolkadot {
client: client.clone(),
pool: transaction_pool.clone(),
deny_unsafe,
+ command_sink: Some(command_sink.clone()),
};
+ let module = crate::rpc::create_full_polkadot(deps)?;
- Ok(crate::rpc::create_full_rpc(deps))
+ let eth_deps = crate::rpc::EthDeps {
+ client: client.clone(),
+ pool: transaction_pool.clone(),
+ graph: transaction_pool.pool().clone(),
+ converter: Some(TransactionConverter),
+ is_authority,
+ enable_dev_signer: eth_config.enable_dev_signer,
+ network: network.clone(),
+ sync_service: sync_service.clone(),
+ frontier_backend: frontier_backend.clone(),
+ storage_override: storage_override.clone(),
+ block_data_cache: block_data_cache.clone(),
+ filter_pool: filter_pool.clone(),
+ max_past_logs: eth_config.max_past_logs,
+ fee_history_cache: fee_history_cache.clone(),
+ fee_history_cache_limit: eth_config.fee_history_limit,
+ execute_gas_limit_multiplier: eth_config.execute_gas_limit_multiplier,
+ };
+
+ crate::rpc::create_eth(
+ module,
+ eth_deps,
+ subscription_task_executor,
+ pubsub_notification_sinks.clone(),
+ )
+ .map_err(Into::into)
})
};
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
- network,
- client,
- keystore: keystore_container.sync_keystore(),
+ rpc_builder,
+ client: client.clone(),
+ transaction_pool: transaction_pool.clone(),
task_manager: &mut task_manager,
- transaction_pool,
- rpc_extensions_builder,
- backend,
+ config: parachain_config,
+ keystore: params.keystore_container.keystore(),
+ backend: backend.clone(),
+ network: network.clone(),
+ sync_service: sync_service.clone(),
system_rpc_tx,
- config,
- telemetry: None,
+ tx_handler_controller,
+ telemetry: telemetry.as_mut(),
})?;
+ spawn_frontier_tasks(
+ &task_manager,
+ client.clone(),
+ backend,
+ frontier_backend,
+ filter_pool,
+ storage_override,
+ fee_history_cache,
+ eth_config.fee_history_limit,
+ sync_service.clone(),
+ pubsub_notification_sinks,
+ );
- log::info!("Development Service Ready");
+ start_network.start_network();
- network_starter.start_network();
- Ok(task_manager)
+ Ok((task_manager, client))
}
diff --git a/node/service/src/eth.rs b/node/service/src/eth.rs
new file mode 100644
index 000000000..ed19208e0
--- /dev/null
+++ b/node/service/src/eth.rs
@@ -0,0 +1,212 @@
+// This file is part of Bifrost.
+
+// Copyright (C) Liebi Technologies PTE. LTD.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+use std::{marker::PhantomData, path::PathBuf, sync::Arc, time::Duration};
+
+use crate::collator_polkadot::{FullBackend, FullClient};
+use bifrost_polkadot_runtime::opaque::Block;
+use cumulus_client_consensus_common::ParachainBlockImportMarker;
+use cumulus_primitives_core::BlockT;
+use fc_consensus::Error;
+pub use fc_consensus::FrontierBlockImport;
+pub use fc_db::kv::Backend as FrontierBackend;
+use fc_mapping_sync::{kv::MappingSyncWorker, SyncStrategy};
+use fc_rpc::EthTask;
+pub use fc_rpc_core::types::{FeeHistoryCache, FeeHistoryCacheLimit, FilterPool};
+use fc_storage::StorageOverride;
+use fp_consensus::ensure_log;
+use fp_rpc::EthereumRuntimeRPCApi;
+use futures::{future, prelude::*};
+use polkadot_service::HeaderT;
+use sc_client_api::{AuxStore, BlockOf, BlockchainEvents};
+use sc_consensus::{
+ BlockCheckParams, BlockImport as BlockImportT, BlockImportParams, ImportResult,
+};
+use sc_network_sync::SyncingService;
+use sc_service::{Configuration, TaskManager};
+use sp_api::ProvideRuntimeApi;
+use sp_block_builder::BlockBuilder as BlockBuilderApi;
+use sp_blockchain::HeaderBackend;
+use sp_consensus::Error as ConsensusError;
+
+pub fn db_config_dir(config: &Configuration) -> PathBuf {
+ config.base_path.config_dir(config.chain_spec.id())
+}
+
+/// The ethereum-compatibility configuration used to run a node.
+#[derive(Clone, Debug, clap::Parser)]
+pub struct EthConfiguration {
+ /// Maximum number of logs in a query.
+ #[arg(long, default_value = "10000")]
+ pub max_past_logs: u32,
+
+ /// Maximum fee history cache size.
+ #[arg(long, default_value = "2048")]
+ pub fee_history_limit: u64,
+
+ #[arg(long)]
+ pub enable_dev_signer: bool,
+
+ /// The dynamic-fee pallet target gas price set by block author
+ #[arg(long, default_value = "1")]
+ pub target_gas_price: u64,
+
+ /// Maximum allowed gas limit will be `block.gas_limit * execute_gas_limit_multiplier`
+ /// when using eth_call/eth_estimateGas.
+ #[arg(long, default_value = "10")]
+ pub execute_gas_limit_multiplier: u64,
+
+ /// Size in bytes of the LRU cache for block data.
+ #[arg(long, default_value = "50")]
+ pub eth_log_block_cache: usize,
+
+ /// Size in bytes of the LRU cache for transactions statuses data.
+ #[arg(long, default_value = "50")]
+ pub eth_statuses_cache: usize,
+}
+
+type BlockNumberOf = <::Header as HeaderT>::Number;
+
+pub struct BlockImport, C> {
+ inner: I,
+ client: Arc,
+ backend: Arc>,
+ evm_since: BlockNumberOf,
+ _marker: PhantomData,
+}
+
+impl, C> Clone for BlockImport {
+ fn clone(&self) -> Self {
+ BlockImport {
+ inner: self.inner.clone(),
+ client: self.client.clone(),
+ backend: self.backend.clone(),
+ evm_since: self.evm_since,
+ _marker: PhantomData,
+ }
+ }
+}
+
+impl BlockImport
+where
+ B: BlockT,
+ I: BlockImportT + Send + Sync,
+ I::Error: Into,
+ C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + BlockOf,
+ C::Api: EthereumRuntimeRPCApi,
+ C::Api: BlockBuilderApi,
+{
+ pub fn new(
+ inner: I,
+ client: Arc,
+ backend: Arc>,
+ evm_since: BlockNumberOf,
+ ) -> Self {
+ Self { inner, client, backend, evm_since, _marker: PhantomData }
+ }
+}
+
+#[async_trait::async_trait]
+impl BlockImportT for BlockImport
+where
+ B: BlockT,
+ ::Number: PartialOrd,
+ I: BlockImportT + Send + Sync,
+ I::Error: Into,
+ C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + BlockOf,
+ C::Api: EthereumRuntimeRPCApi,
+ C::Api: BlockBuilderApi,
+{
+ type Error = ConsensusError;
+
+ async fn check_block(
+ &mut self,
+ block: BlockCheckParams,
+ ) -> Result {
+ self.inner.check_block(block).await.map_err(Into::into)
+ }
+
+ async fn import_block(
+ &mut self,
+ block: BlockImportParams,
+ ) -> Result {
+ if *block.header.number() >= self.evm_since {
+ ensure_log(block.header.digest()).map_err(Error::from)?;
+ }
+ self.inner.import_block(block).await.map_err(Into::into)
+ }
+}
+
+impl, C> ParachainBlockImportMarker for BlockImport {}
+
+pub fn spawn_frontier_tasks(
+ task_manager: &TaskManager,
+ client: Arc,
+ backend: Arc,
+ frontier_backend: Arc>,
+ filter_pool: FilterPool,
+ storage_overrides: Arc>,
+ fee_history_cache: FeeHistoryCache,
+ fee_history_cache_limit: FeeHistoryCacheLimit,
+ sync: Arc>,
+ pubsub_notification_sinks: Arc<
+ fc_mapping_sync::EthereumBlockNotificationSinks<
+ fc_mapping_sync::EthereumBlockNotification,
+ >,
+ >,
+) {
+ task_manager.spawn_essential_handle().spawn(
+ "frontier-mapping-sync-worker",
+ None,
+ MappingSyncWorker::new(
+ client.import_notification_stream(),
+ Duration::new(6, 0),
+ client.clone(),
+ backend,
+ storage_overrides.clone(),
+ frontier_backend,
+ 3,
+ 0,
+ SyncStrategy::Parachain,
+ sync,
+ pubsub_notification_sinks,
+ )
+ .for_each(|()| future::ready(())),
+ );
+
+ // Spawn Frontier EthFilterApi maintenance task.
+ // Each filter is allowed to stay in the pool for 100 blocks.
+ const FILTER_RETAIN_THRESHOLD: u64 = 100;
+ task_manager.spawn_essential_handle().spawn(
+ "frontier-filter-pool",
+ None,
+ EthTask::filter_pool_task(client.clone(), filter_pool, FILTER_RETAIN_THRESHOLD),
+ );
+
+ // Spawn Frontier FeeHistory cache maintenance task.
+ task_manager.spawn_essential_handle().spawn(
+ "frontier-fee-history",
+ None,
+ EthTask::fee_history_task(
+ client,
+ storage_overrides,
+ fee_history_cache,
+ fee_history_cache_limit,
+ ),
+ );
+}
diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs
index 064905019..dbf35cc57 100644
--- a/node/service/src/lib.rs
+++ b/node/service/src/lib.rs
@@ -23,7 +23,9 @@ pub mod chain_spec;
pub mod collator_kusama;
#[cfg(feature = "with-bifrost-polkadot-runtime")]
pub mod collator_polkadot;
+pub mod eth;
pub use bifrost_rpc as rpc;
+pub mod dev;
/// Can be called for a `Configuration` to check if it is a configuration for the `Bifrost` network.
pub trait IdentifyVariant {
diff --git a/pallets/buy-back/src/mock.rs b/pallets/buy-back/src/mock.rs
index 61a75824e..77b9ee1dd 100644
--- a/pallets/buy-back/src/mock.rs
+++ b/pallets/buy-back/src/mock.rs
@@ -433,6 +433,10 @@ impl xcm_executor::Config for XcmConfig {
type AssetExchanger = ();
type Aliasers = Nothing;
type TransactionalProcessor = FrameTransactionalProcessor;
+ type HrmpNewChannelOpenRequestHandler = ();
+ type HrmpChannelAcceptedHandler = ();
+ type HrmpChannelClosingHandler = ();
+ type XcmRecorder = ();
}
#[cfg(feature = "runtime-benchmarks")]
diff --git a/pallets/clouds-convert/src/mock.rs b/pallets/clouds-convert/src/mock.rs
index cf44d4be6..ad5914f29 100644
--- a/pallets/clouds-convert/src/mock.rs
+++ b/pallets/clouds-convert/src/mock.rs
@@ -87,6 +87,11 @@ impl frame_system::Config for Runtime {
type Version = ();
type MaxConsumers = frame_support::traits::ConstU32<16>;
type RuntimeTask = ();
+ type SingleBlockMigrations = ();
+ type MultiBlockMigrator = ();
+ type PreInherents = ();
+ type PostInherents = ();
+ type PostTransactions = ();
}
parameter_types! {
diff --git a/pallets/cross-in-out/src/lib.rs b/pallets/cross-in-out/src/lib.rs
index 1a14206b0..5f1841e6c 100644
--- a/pallets/cross-in-out/src/lib.rs
+++ b/pallets/cross-in-out/src/lib.rs
@@ -29,6 +29,7 @@ use frame_system::pallet_prelude::*;
use orml_traits::MultiCurrency;
use sp_std::boxed::Box;
pub use weights::WeightInfo;
+#[allow(deprecated)]
use xcm::{
opaque::v2::{Junction::AccountId32, Junctions::X1, NetworkId::Any},
v2::MultiLocation,
@@ -48,6 +49,7 @@ type BalanceOf = <::MultiCurrency as MultiCurrency<
>>::Balance;
type AccountIdOf = ::AccountId;
+#[allow(deprecated)]
#[frame_support::pallet]
pub mod pallet {
use super::*;
diff --git a/pallets/cross-in-out/src/tests.rs b/pallets/cross-in-out/src/tests.rs
index 2a7e8b836..77ee7d706 100644
--- a/pallets/cross-in-out/src/tests.rs
+++ b/pallets/cross-in-out/src/tests.rs
@@ -22,8 +22,10 @@ use crate::{mock::*, *};
use bifrost_primitives::currency::KSM;
use frame_support::{assert_noop, assert_ok, WeakBoundedVec};
use sp_runtime::DispatchError::BadOrigin;
+#[allow(deprecated)]
use xcm::opaque::v2::{Junction, Junctions::X1};
+#[allow(deprecated)]
#[test]
fn cross_in_and_cross_out_should_work() {
ExtBuilder::default().one_hundred_for_alice_n_bob().build().execute_with(|| {
@@ -160,6 +162,7 @@ fn add_to_and_remove_from_register_whitelist_should_work() {
});
}
+#[allow(deprecated)]
#[test]
fn register_linked_account_should_work() {
ExtBuilder::default().one_hundred_for_alice_n_bob().build().execute_with(|| {
@@ -235,6 +238,7 @@ fn register_and_deregister_currency_for_cross_in_out_should_work() {
});
}
+#[allow(deprecated)]
#[test]
fn change_outer_linked_account_should_work() {
ExtBuilder::default().one_hundred_for_alice_n_bob().build().execute_with(|| {
diff --git a/pallets/currencies/src/lib.rs b/pallets/currencies/src/lib.rs
index 4d0304868..5c6b448bc 100644
--- a/pallets/currencies/src/lib.rs
+++ b/pallets/currencies/src/lib.rs
@@ -619,13 +619,18 @@ impl fungibles::Inspect for Pallet {
}
impl fungibles::Unbalanced for Pallet {
- fn handle_dust(_dust: fungibles::Dust) {
- // https://github.com/paritytech/substrate/blob/569aae5341ea0c1d10426fa1ec13a36c0b64393b/frame/support/src/traits/tokens/fungibles/regular.rs#L124
- // Note: currently the field of Dust type is private and there is no constructor for it, so
- // we can't construct a Dust value and pass it. Do nothing here.
- // `Pallet` overwrites these functions which can be called as user-level operation of
- // fungibles traits when calling these functions, it will not actually reach
- // `Unbalanced::handle_dust`.
+ fn handle_dust(dust: fungibles::Dust) {
+ let asset = dust.0;
+ if asset == T::GetNativeCurrencyId::get() {
+ >::handle_dust(fungible::Dust(
+ dust.1.into(),
+ ))
+ } else {
+ >::handle_dust(fungibles::Dust(
+ dust.0.into(),
+ dust.1.into(),
+ ))
+ }
}
fn write_balance(
@@ -668,16 +673,26 @@ impl fungibles::Mutate for Pallet {
asset_id: Self::AssetId,
who: &T::AccountId,
amount: Self::Balance,
+ preservation: Preservation,
precision: Precision,
fortitude: Fortitude,
) -> Result {
match asset_id {
id if id == T::GetNativeCurrencyId::get() =>
>::burn_from(
- who, amount, precision, fortitude,
+ who,
+ amount,
+ preservation,
+ precision,
+ fortitude,
),
_ => >::burn_from(
- asset_id, who, amount, precision, fortitude,
+ asset_id,
+ who,
+ amount,
+ preservation,
+ precision,
+ fortitude,
),
}
}
@@ -1246,10 +1261,17 @@ where
fn burn_from(
who: &T::AccountId,
amount: Self::Balance,
+ preservation: Preservation,
precision: Precision,
fortitude: Fortitude,
) -> Result {
- >::burn_from(who, amount, precision, fortitude)
+ >::burn_from(
+ who,
+ amount,
+ preservation,
+ precision,
+ fortitude,
+ )
}
fn transfer(
diff --git a/pallets/evm-accounts/Cargo.toml b/pallets/evm-accounts/Cargo.toml
new file mode 100644
index 000000000..4542bc2d6
--- /dev/null
+++ b/pallets/evm-accounts/Cargo.toml
@@ -0,0 +1,56 @@
+[package]
+name = "pallet-evm-accounts"
+version = "1.1.1"
+authors = ['GalacticCouncil']
+edition = "2021"
+license = "Apache-2.0"
+homepage = 'https://github.com/galacticcouncil/hydradx-node'
+repository = 'https://github.com/galacticcouncil/hydradx-node'
+description = "HydraDX EVM accounts pallet"
+readme = "README.md"
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[dependencies]
+# parity
+scale-info = { version = "2.3.1", default-features = false, features = ["derive"] }
+codec = { default-features = false, features = ["derive"], package = "parity-scale-codec", version = "3.4.0" }
+
+frame-support = { workspace = true }
+frame-system = { workspace = true }
+sp-std = { workspace = true }
+sp-core = { workspace = true }
+
+# Optional imports for benchmarking
+frame-benchmarking = { workspace = true, optional = true }
+sp-io = { workspace = true, optional = true }
+pallet-traits = { workspace = true }
+
+[dev-dependencies]
+sp-core = { workspace = true }
+sp-io = { workspace = true }
+frame-benchmarking = { workspace = true }
+orml-tokens = { workspace = true }
+orml-traits = { workspace = true, default-features = false }
+hex-literal = { workspace = true }
+
+[features]
+default = ["std"]
+std = [
+ "codec/std",
+ "scale-info/std",
+ "sp-std/std",
+ "sp-core/std",
+ "sp-io/std",
+ "frame-support/std",
+ "frame-system/std",
+ "orml-tokens/std",
+ "pallet-traits/std",
+ "frame-benchmarking/std",
+]
+runtime-benchmarks = [
+ "frame-benchmarking/runtime-benchmarks",
+ "sp-io",
+]
+try-runtime = ["frame-support/try-runtime"]
diff --git a/pallets/evm-accounts/README.md b/pallets/evm-accounts/README.md
new file mode 100644
index 000000000..8e2a80b72
--- /dev/null
+++ b/pallets/evm-accounts/README.md
@@ -0,0 +1,34 @@
+# EVM accounts pallet
+
+## Terminology
+
+* **Truncated address:** * A substrate address created from an EVM address by prefixing it with "ETH\0" and appending with eight 0 bytes.
+* **Full Substrate address:** * Original 32 bytes long native address (not a truncated address).
+* **EVM address:** * First 20 bytes of a Substrate address.
+
+## Overview
+
+The pallet allows users to bind their Substrate account to the EVM address and to grant a permission to deploy smart contracts.
+The purpose of this pallet is to make interaction with the EVM easier.
+Binding an address is not necessary for interacting with the EVM.
+
+### Binding
+Without binding, we are unable to get the original Substrate address from the EVM address inside
+of the EVM. Inside of the EVM, we have access only to the EVM address (first 20 bytes of a Substrate account).
+In this case we create and use a truncated version of the original Substrate address that called the EVM.
+The original and truncated address are two different Substrate addresses.
+
+With binding, we store the last 12 bytes of the Substrate address. Then we can get the original
+Substrate address by concatenating these 12 bytes stored in the storage to the EVM address.
+
+### Smart contract deployment
+This pallet also allows granting a permission to deploy smart contracts.
+`ControllerOrigin` can add this permission to EVM addresses.
+The list of whitelisted accounts is stored in the storage of this pallet.
+
+### Dispatchable Functions
+
+* `bind_evm_address` - Binds a Substrate address to EVM address.
+* `add_contract_deployer` - Adds a permission to deploy smart contracts.
+* `remove_contract_deployer` - Removes a permission of whitelisted address to deploy smart contracts.
+* `renounce_contract_deployer` - Renounce caller's permission to deploy smart contracts.
diff --git a/pallets/evm-accounts/rpc/runtime-api/Cargo.toml b/pallets/evm-accounts/rpc/runtime-api/Cargo.toml
new file mode 100644
index 000000000..731980d10
--- /dev/null
+++ b/pallets/evm-accounts/rpc/runtime-api/Cargo.toml
@@ -0,0 +1,24 @@
+[package]
+name = "pallet-evm-accounts-rpc-runtime-api"
+version = "1.0.0"
+authors = ['GalacticCouncil']
+edition = "2021"
+license = "Apache-2.0"
+homepage = 'https://github.com/galacticcouncil/hydradx-node'
+repository = 'https://github.com/galacticcouncil/hydradx-node'
+description = "RPC runtime API for EVM accounts pallet"
+readme = "README.md"
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[dependencies]
+codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] }
+sp-api = { workspace = true }
+
+[features]
+default = ["std"]
+std = [
+ "codec/std",
+ "sp-api/std",
+]
diff --git a/pallets/evm-accounts/rpc/runtime-api/README.md b/pallets/evm-accounts/rpc/runtime-api/README.md
new file mode 100644
index 000000000..5969f05c4
--- /dev/null
+++ b/pallets/evm-accounts/rpc/runtime-api/README.md
@@ -0,0 +1 @@
+Runtime API definition for EVM accounts pallet.
diff --git a/pallets/evm-accounts/rpc/runtime-api/src/lib.rs b/pallets/evm-accounts/rpc/runtime-api/src/lib.rs
new file mode 100644
index 000000000..c26dcacf3
--- /dev/null
+++ b/pallets/evm-accounts/rpc/runtime-api/src/lib.rs
@@ -0,0 +1,38 @@
+// Copyright (C) 2020-2022 Intergalactic, Limited (GIB).
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Runtime API definition for the EVM accounts pallet.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+use codec::Codec;
+
+sp_api::decl_runtime_apis! {
+ /// The API to query EVM account conversions.
+ pub trait EvmAccountsApi where
+ AccountId: Codec,
+ EvmAddress: Codec,
+ {
+ /// get the EVM address from the substrate address.
+ fn evm_address(account_id: AccountId) -> EvmAddress;
+
+ /// Return the Substrate address bound to the EVM account. If not bound, returns `None`.
+ fn bound_account_id(evm_address: EvmAddress) -> Option;
+
+ /// Get the Substrate address from the EVM address.
+ /// Returns the truncated version of the address if the address wasn't bind.
+ fn account_id(evm_address: EvmAddress) -> AccountId;
+ }
+}
diff --git a/pallets/evm-accounts/src/benchmarking.rs b/pallets/evm-accounts/src/benchmarking.rs
new file mode 100644
index 000000000..9214eab69
--- /dev/null
+++ b/pallets/evm-accounts/src/benchmarking.rs
@@ -0,0 +1,78 @@
+// Copyright (C) 2020-2024 Intergalactic, Limited (GIB).
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![cfg(feature = "runtime-benchmarks")]
+
+use super::*;
+use crate::Pallet as EVMAccounts;
+
+use frame_benchmarking::{account, benchmarks};
+use frame_system::RawOrigin;
+use sp_std::prelude::*;
+
+benchmarks! {
+ where_clause {
+ where T::AccountId: AsRef<[u8; 32]> + frame_support::pallet_prelude::IsType,
+ }
+
+ bind_evm_address {
+ let user: T::AccountId = account("user", 0, 1);
+ let evm_address = Pallet::::evm_address(&user);
+ assert!(!AccountExtension::::contains_key(evm_address));
+
+ }: _(RawOrigin::Signed(user.clone()))
+ verify {
+ assert!(AccountExtension::::contains_key(evm_address));
+ }
+
+ add_contract_deployer {
+ let user: T::AccountId = account("user", 0, 1);
+ let evm_address = Pallet::::evm_address(&user);
+ assert!(!ContractDeployer::::contains_key(evm_address));
+
+ }: _(RawOrigin::Root, evm_address)
+ verify {
+ assert!(ContractDeployer::::contains_key(evm_address));
+ }
+
+ remove_contract_deployer {
+ let user: T::AccountId = account("user", 0, 1);
+ let evm_address = Pallet::::evm_address(&user);
+
+ EVMAccounts::::add_contract_deployer(RawOrigin::Root.into(), evm_address)?;
+
+ assert!(ContractDeployer::::contains_key(evm_address));
+
+ }: _(RawOrigin::Root, evm_address)
+ verify {
+ assert!(!ContractDeployer::::contains_key(evm_address));
+ }
+
+ renounce_contract_deployer {
+ let user: T::AccountId = account("user", 0, 1);
+ let evm_address = Pallet::::evm_address(&user);
+
+ EVMAccounts::::add_contract_deployer(RawOrigin::Root.into(), evm_address)?;
+ EVMAccounts::::bind_evm_address(RawOrigin::Signed(user.clone()).into())?;
+
+ assert!(ContractDeployer::::contains_key(evm_address));
+
+ }: _(RawOrigin::Signed(user))
+ verify {
+ assert!(!ContractDeployer::::contains_key(evm_address));
+ }
+
+ impl_benchmark_test_suite!(Pallet, crate::mock::ExtBuilder::default().build(), crate::mock::Test);
+}
diff --git a/pallets/evm-accounts/src/lib.rs b/pallets/evm-accounts/src/lib.rs
new file mode 100644
index 000000000..e86a6d5e2
--- /dev/null
+++ b/pallets/evm-accounts/src/lib.rs
@@ -0,0 +1,317 @@
+// Copyright (C) 2020-2024 Intergalactic, Limited (GIB).
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! # EVM accounts pallet
+//!
+//! ## Terminology
+//!
+//! * **Truncated address:** * A substrate address created from an EVM address by prefixing it with
+//! "ETH\0" and appending with eight 0 bytes.
+//! * **Full Substrate address:** * Original 32 bytes long native address (not a truncated address).
+//! * **EVM address:** * First 20 bytes of a Substrate address.
+//!
+//! ## Overview
+//!
+//! The pallet allows users to bind their Substrate account to the EVM address and to grant a
+//! permission to deploy smart contracts. The purpose of this pallet is to make interaction with the
+//! EVM easier. Binding an address is not necessary for interacting with the EVM.
+//!
+//! ### Binding
+//! Without binding, we are unable to get the original Substrate address from the EVM address inside
+//! of the EVM. Inside of the EVM, we have access only to the EVM address (first 20 bytes of a
+//! Substrate account). In this case we create and use a truncated version of the original Substrate
+//! address that called the EVM. The original and truncated address are two different Substrate
+//! addresses.
+//!
+//! With binding, we store the last 12 bytes of the Substrate address. Then we can get the original
+//! Substrate address by concatenating these 12 bytes stored in the storage to the EVM address.
+//!
+//! ### Smart contract deployment
+//! This pallet also allows granting a permission to deploy smart contracts.
+//! `ControllerOrigin` can add this permission to EVM addresses.
+//! The list of whitelisted accounts is stored in the storage of this pallet.
+//!
+//! ### Dispatchable Functions
+//!
+//! * `bind_evm_address` - Binds a Substrate address to EVM address.
+//! * `add_contract_deployer` - Adds a permission to deploy smart contracts.
+//! * `remove_contract_deployer` - Removes a permission of whitelisted address to deploy smart
+//! contracts.
+//! * `renounce_contract_deployer` - Renounce caller's permission to deploy smart contracts.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+use frame_support::{
+ ensure,
+ pallet_prelude::{DispatchResult, Get},
+};
+use pallet_traits::evm::InspectEvmAccounts;
+use sp_core::{
+ crypto::{AccountId32, ByteArray},
+ H160, U256,
+};
+
+#[cfg(test)]
+mod mock;
+#[cfg(test)]
+mod tests;
+
+mod benchmarking;
+pub mod weights;
+
+pub use pallet::*;
+pub use weights::WeightInfo;
+
+pub type Balance = u128;
+pub type EvmAddress = H160;
+pub type AccountIdLast12Bytes = [u8; 12];
+
+pub trait EvmNonceProvider {
+ fn get_nonce(evm_address: H160) -> U256;
+}
+
+#[frame_support::pallet]
+pub mod pallet {
+ use super::*;
+ use frame_support::pallet_prelude::*;
+ use frame_system::pallet_prelude::*;
+
+ #[pallet::pallet]
+ pub struct Pallet(_);
+
+ #[pallet::config]
+ pub trait Config: frame_system::Config {
+ /// The overarching event type.
+ type RuntimeEvent: From> + IsType<::RuntimeEvent>;
+
+ /// EVM nonce provider.
+ type EvmNonceProvider: EvmNonceProvider;
+
+ /// Fee multiplier for the binding of addresses.
+ #[pallet::constant]
+ type FeeMultiplier: Get;
+
+ /// Origin that can whitelist addresses for smart contract deployment.
+ type ControllerOrigin: EnsureOrigin;
+
+ /// Weight information for extrinsic in this pallet.
+ type WeightInfo: WeightInfo;
+ }
+
+ /// Maps an EVM address to the last 12 bytes of a substrate account.
+ #[pallet::storage]
+ #[pallet::getter(fn account)]
+ pub(super) type AccountExtension =
+ StorageMap<_, Blake2_128Concat, EvmAddress, AccountIdLast12Bytes>;
+
+ /// Whitelisted addresses that are allowed to deploy smart contracts.
+ #[pallet::storage]
+ pub(super) type ContractDeployer = StorageMap<_, Blake2_128Concat, EvmAddress, ()>;
+
+ #[pallet::event]
+ #[pallet::generate_deposit(pub(crate) fn deposit_event)]
+ pub enum Event {
+ /// Binding was created.
+ Bound { account: T::AccountId, address: EvmAddress },
+ /// Deployer was added.
+ DeployerAdded { who: EvmAddress },
+ /// Deployer was removed.
+ DeployerRemoved { who: EvmAddress },
+ }
+
+ #[pallet::error]
+ #[cfg_attr(test, derive(PartialEq, Eq))]
+ pub enum Error {
+ /// EVM Account's nonce is not zero
+ TruncatedAccountAlreadyUsed,
+ /// Address is already bound
+ AddressAlreadyBound,
+ /// Bound address cannot be used
+ BoundAddressCannotBeUsed,
+ /// Address not whitelisted
+ AddressNotWhitelisted,
+ }
+
+ #[pallet::hooks]
+ impl Hooks> for Pallet
+ where
+ T::AccountId: frame_support::traits::IsType,
+ {
+ fn integrity_test() {
+ // implementation of this pallet expects that EvmAddress is 20 bytes and AccountId is 32
+ // bytes long. If this is not true, `copy_from_slice` might panic.
+ assert_eq!(EvmAddress::len_bytes(), 20, "EVM Address is expected to be 20 bytes long.");
+ assert_eq!(AccountId32::LEN, 32, "AccountId is expected to be 32 bytes long.");
+ }
+ }
+
+ #[pallet::call]
+ impl Pallet
+ where
+ T::AccountId: AsRef<[u8; 32]> + frame_support::traits::IsType,
+ {
+ /// Binds a Substrate address to EVM address.
+ /// After binding, the EVM is able to convert an EVM address to the original Substrate
+ /// address. Without binding, the EVM converts an EVM address to a truncated Substrate
+ /// address, which doesn't correspond to the origin address.
+ ///
+ /// Binding an address is not necessary for interacting with the EVM.
+ ///
+ /// Parameters:
+ /// - `origin`: Substrate account binding an address
+ ///
+ /// Emits `EvmAccountBound` event when successful.
+ #[pallet::call_index(0)]
+ #[pallet::weight(::WeightInfo::bind_evm_address().saturating_mul(::FeeMultiplier::get() as u64))]
+ pub fn bind_evm_address(origin: OriginFor) -> DispatchResult {
+ let who = ensure_signed(origin)?;
+
+ let evm_address = Self::evm_address(&who);
+
+ // This check is not necessary. It prevents binding the same address multiple times.
+ // Without this check binding the address second time can have pass or fail, depending
+ // on the nonce. So it's better to prevent any confusion and throw an error when address
+ // is already bound.
+ ensure!(
+ !AccountExtension::::contains_key(evm_address),
+ Error::::AddressAlreadyBound
+ );
+
+ let nonce = T::EvmNonceProvider::get_nonce(evm_address);
+ ensure!(nonce.is_zero(), Error::::TruncatedAccountAlreadyUsed);
+
+ let mut last_12_bytes: [u8; 12] = [0; 12];
+ last_12_bytes.copy_from_slice(&who.as_ref()[20..32]);
+
+ >::insert(evm_address, last_12_bytes);
+
+ Self::deposit_event(Event::Bound { account: who, address: evm_address });
+
+ Ok(())
+ }
+
+ /// Adds an EVM address to the list of addresses that are allowed to deploy smart contracts.
+ ///
+ /// Parameters:
+ /// - `origin`: Substrate account whitelisting an address. Must be `ControllerOrigin`.
+ /// - `address`: EVM address that is whitelisted
+ ///
+ /// Emits `DeployerAdded` event when successful.
+ #[pallet::call_index(1)]
+ #[pallet::weight(::WeightInfo::add_contract_deployer())]
+ pub fn add_contract_deployer(origin: OriginFor, address: EvmAddress) -> DispatchResult {
+ T::ControllerOrigin::ensure_origin(origin.clone())?;
+
+ >::insert(address, ());
+
+ Self::deposit_event(Event::DeployerAdded { who: address });
+
+ Ok(())
+ }
+
+ /// Removes an EVM address from the list of addresses that are allowed to deploy smart
+ /// contracts.
+ ///
+ /// Parameters:
+ /// - `origin`: Substrate account removing the EVM address from the whitelist. Must be
+ /// `ControllerOrigin`.
+ /// - `address`: EVM address that is removed from the whitelist
+ ///
+ /// Emits `DeployerRemoved` event when successful.
+ #[pallet::call_index(2)]
+ #[pallet::weight(