diff --git a/crates/common/src/asterisc/io.rs b/crates/common/src/asterisc/io.rs index b6cda49d8..8c0c96d29 100644 --- a/crates/common/src/asterisc/io.rs +++ b/crates/common/src/asterisc/io.rs @@ -10,9 +10,9 @@ pub struct AsteriscIO; /// See https://jborza.com/post/2021-05-11-riscv-linux-syscalls/ /// /// **Note**: This is not an exhaustive list of system calls available to the `client` program, -/// only the ones necessary for the [BasicKernelInterface] trait implementation. If an extension trait for -/// the [BasicKernelInterface] trait is created for the `asterisc` kernel, this list should be extended -/// accordingly. +/// only the ones necessary for the [BasicKernelInterface] trait implementation. If an extension +/// trait for the [BasicKernelInterface] trait is created for the `asterisc` kernel, this list +/// should be extended accordingly. #[repr(u32)] pub(crate) enum SyscallNumber { /// Sets the Exited and ExitCode states to true and $a0 respectively. diff --git a/crates/common/src/asterisc/mod.rs b/crates/common/src/asterisc/mod.rs index d668c4472..841a85dd7 100644 --- a/crates/common/src/asterisc/mod.rs +++ b/crates/common/src/asterisc/mod.rs @@ -1,5 +1,5 @@ -//! This module contains raw syscall bindings for the `riscv64gc` target architecture, as well as a high-level -//! implementation of the [crate::BasicKernelInterface] trait for the `asterisc` kernel. +//! This module contains raw syscall bindings for the `riscv64gc` target architecture, as well as a +//! high-level implementation of the [crate::BasicKernelInterface] trait for the `asterisc` kernel. pub(crate) mod io; mod syscall; diff --git a/crates/common/src/cannon/io.rs b/crates/common/src/cannon/io.rs index ee833e774..e39f527b0 100644 --- a/crates/common/src/cannon/io.rs +++ b/crates/common/src/cannon/io.rs @@ -1,8 +1,8 @@ use crate::{cannon::syscall, BasicKernelInterface, FileDescriptor, RegisterSize}; use anyhow::{anyhow, Result}; -/// Concrete implementation of the [BasicKernelInterface] trait for the `MIPS32rel1` target architecture. Exposes a safe -/// interface for performing IO operations within the FPVM kernel. +/// Concrete implementation of the [BasicKernelInterface] trait for the `MIPS32rel1` target +/// architecture. Exposes a safe interface for performing IO operations within the FPVM kernel. #[derive(Debug)] pub struct CannonIO; @@ -11,9 +11,9 @@ pub struct CannonIO; /// See [Cannon System Call Specification](https://specs.optimism.io/experimental/fault-proof/cannon-fault-proof-vm.html#syscalls) /// /// **Note**: This is not an exhaustive list of system calls available to the `client` program, -/// only the ones necessary for the [BasicKernelInterface] trait implementation. If an extension trait for -/// the [BasicKernelInterface] trait is created for the `Cannon` kernel, this list should be extended -/// accordingly. +/// only the ones necessary for the [BasicKernelInterface] trait implementation. If an extension +/// trait for the [BasicKernelInterface] trait is created for the `Cannon` kernel, this list should +/// be extended accordingly. #[repr(u32)] pub(crate) enum SyscallNumber { /// Sets the Exited and ExitCode states to true and $a0 respectively. diff --git a/crates/common/src/cannon/mod.rs b/crates/common/src/cannon/mod.rs index 995de9b98..012070c16 100644 --- a/crates/common/src/cannon/mod.rs +++ b/crates/common/src/cannon/mod.rs @@ -1,5 +1,5 @@ -//! This module contains raw syscall bindings for the `MIPS32r2` target architecture, as well as a high-level -//! implementation of the [crate::BasicKernelInterface] trait for the `Cannon` kernel. +//! This module contains raw syscall bindings for the `MIPS32r2` target architecture, as well as a +//! high-level implementation of the [crate::BasicKernelInterface] trait for the `Cannon` kernel. pub(crate) mod io; mod syscall; diff --git a/crates/common/src/cannon/syscall.rs b/crates/common/src/cannon/syscall.rs index c17294b68..92509c2cf 100644 --- a/crates/common/src/cannon/syscall.rs +++ b/crates/common/src/cannon/syscall.rs @@ -61,9 +61,7 @@ pub(crate) unsafe fn syscall1(n: RegisterSize, arg1: RegisterSize) -> RegisterSi options(nostack, preserves_flags) ); - (err == 0) - .then_some(ret) - .unwrap_or_else(|| ret.wrapping_neg()) + (err == 0).then_some(ret).unwrap_or_else(|| ret.wrapping_neg()) } /// Issues a raw system call with 3 arguments. (e.g. read, write) @@ -97,16 +95,12 @@ pub(crate) unsafe fn syscall3( options(nostack, preserves_flags) ); - let value = (err == 0) - .then_some(ret) - .unwrap_or_else(|| ret.wrapping_neg()); + let value = (err == 0).then_some(ret).unwrap_or_else(|| ret.wrapping_neg()); - (value <= -4096isize as RegisterSize) - .then_some(value) - .ok_or_else(|| { - // Truncation of the error value is guaranteed to never occur due to - // the above check. This is the same check that musl uses: - // https://git.musl-libc.org/cgit/musl/tree/src/internal/syscall_ret.c?h=v1.1.15 - -(value as i32) - }) + (value <= -4096isize as RegisterSize).then_some(value).ok_or_else(|| { + // Truncation of the error value is guaranteed to never occur due to + // the above check. This is the same check that musl uses: + // https://git.musl-libc.org/cgit/musl/tree/src/internal/syscall_ret.c?h=v1.1.15 + -(value as i32) + }) } diff --git a/crates/common/src/executor.rs b/crates/common/src/executor.rs index 2d7babb52..46c4f6c4e 100644 --- a/crates/common/src/executor.rs +++ b/crates/common/src/executor.rs @@ -1,5 +1,5 @@ -//! This module contains utilities for handling async functions in the no_std environment. This allows for usage of -//! async/await syntax for futures in a single thread. +//! This module contains utilities for handling async functions in the no_std environment. This +//! allows for usage of async/await syntax for futures in a single thread. use alloc::boxed::Box; use core::{ @@ -7,8 +7,8 @@ use core::{ task::{Context, Poll, RawWaker, RawWakerVTable, Waker}, }; -/// This function busy waits on a future until it is ready. It uses a no-op waker to poll the future in a -/// thread-blocking loop. +/// This function busy waits on a future until it is ready. It uses a no-op waker to poll the future +/// in a thread-blocking loop. pub fn block_on(f: impl Future) -> T { let mut f = Box::pin(f); diff --git a/crates/common/src/io.rs b/crates/common/src/io.rs index 351950dd9..33922537b 100644 --- a/crates/common/src/io.rs +++ b/crates/common/src/io.rs @@ -85,22 +85,19 @@ mod native_io { // forget the file descriptor so that the `Drop` impl doesn't close it. std::mem::forget(file); - n.try_into() - .map_err(|_| anyhow!("Failed to convert usize to RegisterSize")) + n.try_into().map_err(|_| anyhow!("Failed to convert usize to RegisterSize")) } fn read(fd: FileDescriptor, buf: &mut [u8]) -> Result { let raw_fd: RegisterSize = fd.into(); let mut file = unsafe { File::from_raw_fd(raw_fd as i32) }; - let n = file - .read(buf) - .map_err(|e| anyhow!("Error reading from file descriptor: {e}"))?; + let n = + file.read(buf).map_err(|e| anyhow!("Error reading from file descriptor: {e}"))?; // forget the file descriptor so that the `Drop` impl doesn't close it. std::mem::forget(file); - n.try_into() - .map_err(|_| anyhow!("Failed to convert usize to RegisterSize")) + n.try_into().map_err(|_| anyhow!("Failed to convert usize to RegisterSize")) } fn exit(code: RegisterSize) -> ! { diff --git a/crates/common/src/lib.rs b/crates/common/src/lib.rs index c7d1e39de..f290b77f1 100644 --- a/crates/common/src/lib.rs +++ b/crates/common/src/lib.rs @@ -1,10 +1,5 @@ #![doc = include_str!("../README.md")] -#![warn( - missing_debug_implementations, - missing_docs, - unreachable_pub, - rustdoc::all -)] +#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)] #![deny(unused_must_use, rust_2018_idioms)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(target_arch = "mips", feature(asm_experimental_arch))] diff --git a/crates/common/src/malloc.rs b/crates/common/src/malloc.rs index 6f48d582a..349ebfb37 100644 --- a/crates/common/src/malloc.rs +++ b/crates/common/src/malloc.rs @@ -20,8 +20,8 @@ pub mod global_allocator { /// This function is unsafe because the caller must ensure: /// * The allocator has not already been initialized. /// * The provided memory region must be valid, non-null, and not used by anything else. - /// * After aligning the start and end addresses, the size of the heap must be > 0, or the function - /// will panic. + /// * After aligning the start and end addresses, the size of the heap must be > 0, or the + /// function will panic. pub unsafe fn init_allocator(heap_start_addr: *mut u8, heap_size: usize) { ALLOCATOR.lock().init(heap_start_addr, heap_size) } diff --git a/crates/common/src/traits/basic.rs b/crates/common/src/traits/basic.rs index ed6825edc..884b9e80c 100644 --- a/crates/common/src/traits/basic.rs +++ b/crates/common/src/traits/basic.rs @@ -1,15 +1,16 @@ -//! Defines the [BasicKernelInterface] trait, which describes the functionality of several system calls inside of -//! the FPVM kernel. +//! Defines the [BasicKernelInterface] trait, which describes the functionality of several system +//! calls inside of the FPVM kernel. use crate::{FileDescriptor, RegisterSize}; use anyhow::Result; -/// The [BasicKernelInterface] trait describes the functionality of several core system calls inside of -/// the FPVM kernel. Commonly, FPVMs delegate IO operations to custom file descriptors in the `client` program. It is -/// a safe wrapper around the raw system calls available to the `client` program. +/// The [BasicKernelInterface] trait describes the functionality of several core system calls inside +/// of the FPVM kernel. Commonly, FPVMs delegate IO operations to custom file descriptors in the +/// `client` program. It is a safe wrapper around the raw system calls available to the `client` +/// program. /// -/// In cases where the set of system calls defined in this trait need to be extended, an additional trait should be -/// created that extends this trait. +/// In cases where the set of system calls defined in this trait need to be extended, an additional +/// trait should be created that extends this trait. pub trait BasicKernelInterface { /// Write the given buffer to the given file descriptor. fn write(fd: FileDescriptor, buf: &[u8]) -> Result; diff --git a/crates/derive/src/lib.rs b/crates/derive/src/lib.rs index 0349da6ab..a2cf953c4 100644 --- a/crates/derive/src/lib.rs +++ b/crates/derive/src/lib.rs @@ -1,10 +1,5 @@ #![doc = include_str!("../README.md")] -#![warn( - missing_debug_implementations, - missing_docs, - unreachable_pub, - rustdoc::all -)] +#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)] #![deny(unused_must_use, rust_2018_idioms)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![no_std] diff --git a/crates/derive/src/stages/channel_bank.rs b/crates/derive/src/stages/channel_bank.rs index 4da1ee609..73feddfb7 100644 --- a/crates/derive/src/stages/channel_bank.rs +++ b/crates/derive/src/stages/channel_bank.rs @@ -6,8 +6,7 @@ use crate::{ traits::{ChainProvider, DataAvailabilityProvider, ResettableStage}, types::{BlockInfo, Channel, Frame, RollupConfig, StageError, StageResult, SystemConfig}, }; -use alloc::sync::Arc; -use alloc::{boxed::Box, collections::VecDeque}; +use alloc::{boxed::Box, collections::VecDeque, sync::Arc}; use alloy_primitives::Bytes; use anyhow::anyhow; use async_trait::async_trait; @@ -23,8 +22,8 @@ use hashbrown::HashMap; /// Note: we prune before we ingest data. /// As we switch between ingesting data & reading, the prune step occurs at an odd point /// Specifically, the channel bank is not allowed to become too large between successive calls -/// to `IngestData`. This means that we can do an ingest and then do a read while becoming too large. -/// [ChannelBank] buffers channel frames, and emits full channel data +/// to `IngestData`. This means that we can do an ingest and then do a read while becoming too +/// large. [ChannelBank] buffers channel frames, and emits full channel data #[derive(Debug)] pub struct ChannelBank where @@ -48,12 +47,7 @@ where { /// Create a new [ChannelBank] stage. pub fn new(cfg: RollupConfig, prev: FrameQueue) -> Self { - Self { - cfg: Arc::new(cfg), - channels: HashMap::new(), - channel_queue: VecDeque::new(), - prev, - } + Self { cfg: Arc::new(cfg), channels: HashMap::new(), channel_queue: VecDeque::new(), prev } } /// Returns the L1 origin [BlockInfo]. @@ -71,14 +65,8 @@ where pub fn prune(&mut self) -> StageResult<()> { let mut total_size = self.size(); while total_size > MAX_CHANNEL_BANK_SIZE { - let id = self - .channel_queue - .pop_front() - .ok_or(anyhow!("No channel to prune"))?; - let channel = self - .channels - .remove(&id) - .ok_or(anyhow!("Could not find channel"))?; + let id = self.channel_queue.pop_front().ok_or(anyhow!("No channel to prune"))?; + let channel = self.channels.remove(&id).ok_or(anyhow!("Could not find channel"))?; total_size -= channel.size(); } Ok(()) @@ -120,10 +108,7 @@ where // Return an `Ok(None)` if the first channel is timed out. There may be more timed // out channels at the head of the queue and we want to remove them all. let first = self.channel_queue[0]; - let channel = self - .channels - .get(&first) - .ok_or(anyhow!("Channel not found"))?; + let channel = self.channels.get(&first).ok_or(anyhow!("Channel not found"))?; let origin = self.origin().ok_or(anyhow!("No origin present"))?; // Remove all timed out channels from the front of the `channel_queue`. @@ -133,11 +118,12 @@ where return Ok(None); } - // At this point we have removed all timed out channels from the front of the `channel_queue`. - // Pre-Canyon we simply check the first index. + // At this point we have removed all timed out channels from the front of the + // `channel_queue`. Pre-Canyon we simply check the first index. // Post-Canyon we read the entire channelQueue for the first ready channel. // If no channel is available, we return StageError::Eof. - // Canyon is activated when the first L1 block whose time >= CanyonTime, not on the L2 timestamp. + // Canyon is activated when the first L1 block whose time >= CanyonTime, not on the L2 + // timestamp. if !self.cfg.is_canyon_active(origin.timestamp) { return self.try_read_channel_at_index(0).map(Some); } @@ -150,8 +136,9 @@ where } } - /// Pulls the next piece of data from the channel bank. Note that it attempts to pull data out of the channel bank prior to - /// loading data in (unlike most other stages). This is to ensure maintain consistency around channel bank pruning which depends upon the order + /// Pulls the next piece of data from the channel bank. Note that it attempts to pull data out + /// of the channel bank prior to loading data in (unlike most other stages). This is to + /// ensure maintain consistency around channel bank pruning which depends upon the order /// of operations. pub async fn next_data(&mut self) -> StageResult> { match self.read() { @@ -170,15 +157,12 @@ where Err(StageError::NotEnoughData) } - /// Attempts to read the channel at the specified index. If the channel is not ready or timed out, - /// it will return an error. + /// Attempts to read the channel at the specified index. If the channel is not ready or timed + /// out, it will return an error. /// If the channel read was successful, it will remove the channel from the channel queue. fn try_read_channel_at_index(&mut self, index: usize) -> StageResult { let channel_id = self.channel_queue[index]; - let channel = self - .channels - .get(&channel_id) - .ok_or(anyhow!("Channel not found"))?; + let channel = self.channels.get(&channel_id).ok_or(anyhow!("Channel not found"))?; let origin = self.origin().ok_or(anyhow!("No origin present"))?; let timed_out = channel.open_block_number() + self.cfg.channel_timeout < origin.number; @@ -210,10 +194,13 @@ where #[cfg(test)] mod tests { use super::*; - use crate::stages::frame_queue::tests::new_test_frames; - use crate::stages::l1_retrieval::L1Retrieval; - use crate::stages::l1_traversal::tests::new_test_traversal; - use crate::traits::test_utils::TestDAP; + use crate::{ + stages::{ + frame_queue::tests::new_test_frames, l1_retrieval::L1Retrieval, + l1_traversal::tests::new_test_traversal, + }, + traits::test_utils::TestDAP, + }; use alloc::vec; #[test] diff --git a/crates/derive/src/stages/channel_reader.rs b/crates/derive/src/stages/channel_reader.rs index abebafbda..8ebea8ff1 100644 --- a/crates/derive/src/stages/channel_reader.rs +++ b/crates/derive/src/stages/channel_reader.rs @@ -30,10 +30,7 @@ where { /// Create a new [ChannelReader] stage. pub fn new(prev: ChannelBank) -> Self { - Self { - prev, - next_batch: None, - } + Self { prev, next_batch: None } } /// Pulls out the next Batch from the available channel. @@ -114,21 +111,13 @@ impl BatchReader { impl From<&[u8]> for BatchReader { fn from(data: &[u8]) -> Self { - Self { - data: Some(data.to_vec()), - decompressed: Vec::new(), - cursor: 0, - } + Self { data: Some(data.to_vec()), decompressed: Vec::new(), cursor: 0 } } } impl From> for BatchReader { fn from(data: Vec) -> Self { - Self { - data: Some(data), - decompressed: Vec::new(), - cursor: 0, - } + Self { data: Some(data), decompressed: Vec::new(), cursor: 0 } } } diff --git a/crates/derive/src/stages/frame_queue.rs b/crates/derive/src/stages/frame_queue.rs index 51221eb63..a7e219aa3 100644 --- a/crates/derive/src/stages/frame_queue.rs +++ b/crates/derive/src/stages/frame_queue.rs @@ -31,10 +31,7 @@ where { /// Create a new frame queue stage. pub fn new(prev: L1Retrieval) -> Self { - Self { - prev, - queue: VecDeque::new(), - } + Self { prev, queue: VecDeque::new() } } /// Returns the L1 origin [BlockInfo]. @@ -62,9 +59,7 @@ where return Err(anyhow!("Not enough data").into()); } - self.queue - .pop_front() - .ok_or_else(|| anyhow!("Frame queue is impossibly empty.").into()) + self.queue.pop_front().ok_or_else(|| anyhow!("Frame queue is impossibly empty.").into()) } } @@ -83,11 +78,11 @@ where #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::stages::l1_traversal::tests::new_test_traversal; - use crate::traits::test_utils::TestDAP; - use crate::DERIVATION_VERSION_0; - use alloc::vec; - use alloc::vec::Vec; + use crate::{ + stages::l1_traversal::tests::new_test_traversal, traits::test_utils::TestDAP, + DERIVATION_VERSION_0, + }; + use alloc::{vec, vec::Vec}; use alloy_primitives::Bytes; pub(crate) fn new_test_frames(count: usize) -> Vec { @@ -159,9 +154,7 @@ pub(crate) mod tests { async fn test_frame_queue_single_frame() { let data = new_encoded_test_frames(1); let traversal = new_test_traversal(true, true); - let dap = TestDAP { - results: vec![Ok(data)], - }; + let dap = TestDAP { results: vec![Ok(data)] }; let retrieval = L1Retrieval::new(traversal, dap); let mut frame_queue = FrameQueue::new(retrieval); let frame_decoded = frame_queue.next_frame().await.unwrap(); @@ -175,9 +168,7 @@ pub(crate) mod tests { async fn test_frame_queue_multiple_frames() { let data = new_encoded_test_frames(3); let traversal = new_test_traversal(true, true); - let dap = TestDAP { - results: vec![Ok(data)], - }; + let dap = TestDAP { results: vec![Ok(data)] }; let retrieval = L1Retrieval::new(traversal, dap); let mut frame_queue = FrameQueue::new(retrieval); for i in 0..3 { diff --git a/crates/derive/src/stages/l1_retrieval.rs b/crates/derive/src/stages/l1_retrieval.rs index c69dea9c3..df485a5b9 100644 --- a/crates/derive/src/stages/l1_retrieval.rs +++ b/crates/derive/src/stages/l1_retrieval.rs @@ -30,13 +30,10 @@ where DAP: DataAvailabilityProvider, CP: ChainProvider, { - /// Creates a new L1 retrieval stage with the given data availability provider and previous stage. + /// Creates a new L1 retrieval stage with the given data availability provider and previous + /// stage. pub fn new(prev: L1Traversal, provider: DAP) -> Self { - Self { - prev, - provider, - data: None, - } + Self { prev, provider, data: None } } /// Returns the current L1 block in the traversal stage, if it exists. @@ -53,11 +50,8 @@ where .prev .next_l1_block()? .ok_or_else(|| anyhow!("No block to retrieve data from"))?; - self.data = Some( - self.provider - .open_data(&next, self.prev.system_config.batcher_addr) - .await?, - ); + self.data = + Some(self.provider.open_data(&next, self.prev.system_config.batcher_addr).await?); } let data = self.data.as_mut().expect("Cannot be None").next(); @@ -87,8 +81,10 @@ where #[cfg(test)] mod tests { use super::*; - use crate::stages::l1_traversal::tests::new_test_traversal; - use crate::traits::test_utils::{TestDAP, TestIter}; + use crate::{ + stages::l1_traversal::tests::new_test_traversal, + traits::test_utils::{TestDAP, TestIter}, + }; use alloc::vec; use alloy_primitives::Address; @@ -132,11 +128,7 @@ mod tests { // (traversal) is called in the retrieval stage. let traversal = new_test_traversal(false, false); let dap = TestDAP { results: vec![] }; - let mut retrieval = L1Retrieval { - prev: traversal, - provider: dap, - data: Some(data), - }; + let mut retrieval = L1Retrieval { prev: traversal, provider: dap, data: Some(data) }; let data = retrieval.next_data().await.unwrap(); assert_eq!(data, Bytes::default()); assert!(retrieval.data.is_some()); @@ -152,11 +144,7 @@ mod tests { }; let traversal = new_test_traversal(true, true); let dap = TestDAP { results: vec![] }; - let mut retrieval = L1Retrieval { - prev: traversal, - provider: dap, - data: Some(data), - }; + let mut retrieval = L1Retrieval { prev: traversal, provider: dap, data: Some(data) }; let data = retrieval.next_data().await.unwrap_err(); assert_eq!(data, StageError::Eof); assert!(retrieval.data.is_none()); diff --git a/crates/derive/src/stages/l1_traversal.rs b/crates/derive/src/stages/l1_traversal.rs index 4e05f7671..d2ca0f962 100644 --- a/crates/derive/src/stages/l1_traversal.rs +++ b/crates/derive/src/stages/l1_traversal.rs @@ -4,8 +4,7 @@ use crate::{ traits::{ChainProvider, ResettableStage}, types::{BlockInfo, RollupConfig, StageError, StageResult, SystemConfig}, }; -use alloc::boxed::Box; -use alloc::sync::Arc; +use alloc::{boxed::Box, sync::Arc}; use anyhow::anyhow; use async_trait::async_trait; @@ -63,10 +62,7 @@ impl L1Traversal { // Pull the next block or return EOF which has special // handling further up the pipeline. let block = self.block.ok_or(StageError::Eof)?; - let next_l1_origin = self - .data_source - .block_info_by_number(block.number + 1) - .await?; + let next_l1_origin = self.data_source.block_info_by_number(block.number + 1).await?; // Check for reorgs if block.hash != next_l1_origin.parent_hash { @@ -79,10 +75,7 @@ impl L1Traversal { } // Fetch receipts. - let receipts = self - .data_source - .receipts_by_hash(next_l1_origin.hash) - .await?; + let receipts = self.data_source.receipts_by_hash(next_l1_origin.hash).await?; self.system_config.update_with_receipts( receipts.as_slice(), &self.rollup_config, @@ -108,8 +101,10 @@ impl ResettableStage for L1Traversal { #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::traits::test_utils::TestChainProvider; - use crate::types::{Receipt, CONFIG_UPDATE_EVENT_VERSION_0, CONFIG_UPDATE_TOPIC}; + use crate::{ + traits::test_utils::TestChainProvider, + types::{Receipt, CONFIG_UPDATE_EVENT_VERSION_0, CONFIG_UPDATE_TOPIC}, + }; use alloc::vec; use alloy_primitives::{address, b256, hex, Address, Bytes, Log, LogData, B256}; @@ -146,10 +141,7 @@ pub(crate) mod tests { provider.insert_block(1, block); } if receipts { - let mut receipt = Receipt { - success: true, - ..Receipt::default() - }; + let mut receipt = Receipt { success: true, ..Receipt::default() }; let bad = Log::new( Address::from([2; 20]), vec![CONFIG_UPDATE_TOPIC, B256::default()], @@ -166,10 +158,7 @@ pub(crate) mod tests { #[tokio::test] async fn test_l1_traversal() { let mut traversal = new_test_traversal(true, true); - assert_eq!( - traversal.next_l1_block().unwrap(), - Some(BlockInfo::default()) - ); + assert_eq!(traversal.next_l1_block().unwrap(), Some(BlockInfo::default())); assert_eq!(traversal.next_l1_block().unwrap_err(), StageError::Eof); assert!(traversal.advance_l1_block().await.is_ok()); } @@ -177,38 +166,23 @@ pub(crate) mod tests { #[tokio::test] async fn test_l1_traversal_missing_receipts() { let mut traversal = new_test_traversal(true, false); - assert_eq!( - traversal.next_l1_block().unwrap(), - Some(BlockInfo::default()) - ); + assert_eq!(traversal.next_l1_block().unwrap(), Some(BlockInfo::default())); assert_eq!(traversal.next_l1_block().unwrap_err(), StageError::Eof); - matches!( - traversal.advance_l1_block().await.unwrap_err(), - StageError::Custom(_) - ); + matches!(traversal.advance_l1_block().await.unwrap_err(), StageError::Custom(_)); } #[tokio::test] async fn test_l1_traversal_missing_blocks() { let mut traversal = new_test_traversal(false, false); - assert_eq!( - traversal.next_l1_block().unwrap(), - Some(BlockInfo::default()) - ); + assert_eq!(traversal.next_l1_block().unwrap(), Some(BlockInfo::default())); assert_eq!(traversal.next_l1_block().unwrap_err(), StageError::Eof); - matches!( - traversal.advance_l1_block().await.unwrap_err(), - StageError::Custom(_) - ); + matches!(traversal.advance_l1_block().await.unwrap_err(), StageError::Custom(_)); } #[tokio::test] async fn test_system_config_updated() { let mut traversal = new_test_traversal(true, true); - assert_eq!( - traversal.next_l1_block().unwrap(), - Some(BlockInfo::default()) - ); + assert_eq!(traversal.next_l1_block().unwrap(), Some(BlockInfo::default())); assert_eq!(traversal.next_l1_block().unwrap_err(), StageError::Eof); assert!(traversal.advance_l1_block().await.is_ok()); let expected = address!("000000000000000000000000000000000000bEEF"); diff --git a/crates/derive/src/stages/mod.rs b/crates/derive/src/stages/mod.rs index 761995d9b..1557d9cbe 100644 --- a/crates/derive/src/stages/mod.rs +++ b/crates/derive/src/stages/mod.rs @@ -1,6 +1,6 @@ -//! This module contains each stage of the derivation pipeline, and offers a high-level API to functionally -//! apply each stage's output as an input to the next stage, until finally arriving at the produced execution -//! payloads. +//! This module contains each stage of the derivation pipeline, and offers a high-level API to +//! functionally apply each stage's output as an input to the next stage, until finally arriving at +//! the produced execution payloads. //! //! **Stages:** //! 1. L1 Traversal diff --git a/crates/derive/src/traits/data_sources.rs b/crates/derive/src/traits/data_sources.rs index a1521ce21..5721e00be 100644 --- a/crates/derive/src/traits/data_sources.rs +++ b/crates/derive/src/traits/data_sources.rs @@ -1,4 +1,5 @@ -//! Contains traits that describe the functionality of various data sources used in the derivation pipeline's stages. +//! Contains traits that describe the functionality of various data sources used in the derivation +//! pipeline's stages. use crate::types::{BlockInfo, Receipt, StageResult}; use alloc::{boxed::Box, vec::Vec}; @@ -10,11 +11,12 @@ use core::fmt::Debug; /// Describes the functionality of a data source that can provide information from the blockchain. #[async_trait] pub trait ChainProvider { - /// Returns the block at the given number, or an error if the block does not exist in the data source. + /// Returns the block at the given number, or an error if the block does not exist in the data + /// source. async fn block_info_by_number(&self, number: u64) -> Result; - /// Returns all receipts in the block with the given hash, or an error if the block does not exist in the data - /// source. + /// Returns all receipts in the block with the given hash, or an error if the block does not + /// exist in the data source. async fn receipts_by_hash(&self, hash: B256) -> Result>; } @@ -24,8 +26,8 @@ pub trait DataAvailabilityProvider { /// An iterator over returned bytes data. type DataIter: DataIter + Send + Debug; - /// Returns the data availability for the block with the given hash, or an error if the block does not exist in the - /// data source. + /// Returns the data availability for the block with the given hash, or an error if the block + /// does not exist in the data source. async fn open_data( &self, block_ref: &BlockInfo, @@ -35,6 +37,7 @@ pub trait DataAvailabilityProvider { /// Describes the behavior of a data iterator. pub trait DataIter { - /// Returns the next item in the iterator, or [crate::types::StageError::Eof] if the iterator is exhausted. + /// Returns the next item in the iterator, or [crate::types::StageError::Eof] if the iterator is + /// exhausted. fn next(&mut self) -> StageResult; } diff --git a/crates/derive/src/traits/mod.rs b/crates/derive/src/traits/mod.rs index cd1d1ab9f..a9993a70b 100644 --- a/crates/derive/src/traits/mod.rs +++ b/crates/derive/src/traits/mod.rs @@ -1,4 +1,5 @@ -//! This module contains all of the traits describing functionality of portions of the derivation pipeline. +//! This module contains all of the traits describing functionality of portions of the derivation +//! pipeline. mod data_sources; pub use data_sources::{ChainProvider, DataAvailabilityProvider, DataIter}; diff --git a/crates/derive/src/traits/test_utils/data_availability.rs b/crates/derive/src/traits/test_utils/data_availability.rs index 9f21378ea..ffb7071ef 100644 --- a/crates/derive/src/traits/test_utils/data_availability.rs +++ b/crates/derive/src/traits/test_utils/data_availability.rs @@ -50,9 +50,6 @@ impl DataAvailabilityProvider for TestDAP { Err(_) => Err(StageError::Eof), }) .collect::>>(); - Ok(TestIter { - open_data_calls: vec![(*block_ref, batcher_address)], - results, - }) + Ok(TestIter { open_data_calls: vec![(*block_ref, batcher_address)], results }) } } diff --git a/crates/derive/src/traits/test_utils/data_sources.rs b/crates/derive/src/traits/test_utils/data_sources.rs index 02518b44e..13c23179a 100644 --- a/crates/derive/src/traits/test_utils/data_sources.rs +++ b/crates/derive/src/traits/test_utils/data_sources.rs @@ -1,7 +1,9 @@ //! Data Sources Test Utilities -use crate::traits::ChainProvider; -use crate::types::{BlockInfo, Receipt}; +use crate::{ + traits::ChainProvider, + types::{BlockInfo, Receipt}, +}; use alloc::{boxed::Box, vec::Vec}; use alloy_primitives::B256; use anyhow::Result; diff --git a/crates/derive/src/types/alloy/eips/eip1559/helpers.rs b/crates/derive/src/types/alloy/eips/eip1559/helpers.rs index 6987c4de7..82e34001c 100644 --- a/crates/derive/src/types/alloy/eips/eip1559/helpers.rs +++ b/crates/derive/src/types/alloy/eips/eip1559/helpers.rs @@ -40,12 +40,12 @@ pub fn calc_next_block_base_fee( // increased base fee. core::cmp::Ordering::Greater => { // Calculate the increase in base fee based on the formula defined by EIP-1559. - base_fee - + (core::cmp::max( + base_fee + + (core::cmp::max( // Ensure a minimum increase of 1. 1, - base_fee as u128 * (gas_used - gas_target) as u128 - / (gas_target as u128 * base_fee_params.max_change_denominator as u128), + base_fee as u128 * (gas_used - gas_target) as u128 / + (gas_target as u128 * base_fee_params.max_change_denominator as u128), ) as u64) } // If the gas used in the current block is less than the gas target, calculate a new @@ -53,8 +53,8 @@ pub fn calc_next_block_base_fee( core::cmp::Ordering::Less => { // Calculate the decrease in base fee based on the formula defined by EIP-1559. base_fee.saturating_sub( - (base_fee as u128 * (gas_target - gas_used) as u128 - / (gas_target as u128 * base_fee_params.max_change_denominator as u128)) + (base_fee as u128 * (gas_target - gas_used) as u128 / + (gas_target as u128 * base_fee_params.max_change_denominator as u128)) as u64, ) } @@ -69,10 +69,7 @@ mod tests { #[test] fn min_protocol_sanity() { - assert_eq!( - MIN_PROTOCOL_BASE_FEE_U256.to::(), - MIN_PROTOCOL_BASE_FEE - ); + assert_eq!(MIN_PROTOCOL_BASE_FEE_U256.to::(), MIN_PROTOCOL_BASE_FEE); } #[test] diff --git a/crates/derive/src/types/alloy/eips/eip2718.rs b/crates/derive/src/types/alloy/eips/eip2718.rs index 55e3b1ed1..e7284e66d 100644 --- a/crates/derive/src/types/alloy/eips/eip2718.rs +++ b/crates/derive/src/types/alloy/eips/eip2718.rs @@ -151,11 +151,7 @@ pub trait Encodable2718: Sized + Send + Sync + 'static { /// simply the legacy encoding. fn network_encode(&self, out: &mut dyn BufMut) { if !self.is_legacy() { - Header { - list: false, - payload_length: self.encode_2718_len(), - } - .encode(out); + Header { list: false, payload_length: self.encode_2718_len() }.encode(out); } self.encode_2718(out); diff --git a/crates/derive/src/types/alloy/eips/eip2930.rs b/crates/derive/src/types/alloy/eips/eip2930.rs index 8e0dc1608..85e59fb07 100644 --- a/crates/derive/src/types/alloy/eips/eip2930.rs +++ b/crates/derive/src/types/alloy/eips/eip2930.rs @@ -47,10 +47,7 @@ impl AccessList { self.0.into_iter().map(|item| { ( item.address, - item.storage_keys - .into_iter() - .map(|slot| U256::from_be_bytes(slot.0)) - .collect(), + item.storage_keys.into_iter().map(|slot| U256::from_be_bytes(slot.0)).collect(), ) }) } @@ -60,10 +57,7 @@ impl AccessList { self.0.iter().map(|item| { ( item.address, - item.storage_keys - .iter() - .map(|slot| U256::from_be_bytes(slot.0)) - .collect(), + item.storage_keys.iter().map(|slot| U256::from_be_bytes(slot.0)).collect(), ) }) } @@ -72,7 +66,7 @@ impl AccessList { #[inline] pub fn size(&self) -> usize { // take into account capacity - self.0.iter().map(AccessListItem::size).sum::() - + self.0.capacity() * mem::size_of::() + self.0.iter().map(AccessListItem::size).sum::() + + self.0.capacity() * mem::size_of::() } } diff --git a/crates/derive/src/types/alloy/eips/eip4844.rs b/crates/derive/src/types/alloy/eips/eip4844.rs index e9694038b..a27c2005f 100644 --- a/crates/derive/src/types/alloy/eips/eip4844.rs +++ b/crates/derive/src/types/alloy/eips/eip4844.rs @@ -99,21 +99,9 @@ mod tests { (0, TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB, 0), // If the target blob gas is exceeded, the excessBlobGas should increase // by however much it was overshot - ( - 0, - (TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB) + 1, - DATA_GAS_PER_BLOB, - ), - ( - 1, - (TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB) + 1, - DATA_GAS_PER_BLOB + 1, - ), - ( - 1, - (TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB) + 2, - 2 * DATA_GAS_PER_BLOB + 1, - ), + (0, (TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB) + 1, DATA_GAS_PER_BLOB), + (1, (TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB) + 1, DATA_GAS_PER_BLOB + 1), + (1, (TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB) + 2, 2 * DATA_GAS_PER_BLOB + 1), // The excess blob gas should decrease by however much the target was // under-shot, capped at zero. ( @@ -131,11 +119,7 @@ mod tests { (TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB) - 2, TARGET_DATA_GAS_PER_BLOCK - (2 * DATA_GAS_PER_BLOB), ), - ( - DATA_GAS_PER_BLOB - 1, - (TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB) - 1, - 0, - ), + (DATA_GAS_PER_BLOB - 1, (TARGET_DATA_GAS_PER_BLOCK / DATA_GAS_PER_BLOB) - 1, 0), ] { let actual = calc_excess_blob_gas(excess, blobs * DATA_GAS_PER_BLOB); assert_eq!(actual, expected, "test: {t:?}"); diff --git a/crates/derive/src/types/alloy/header.rs b/crates/derive/src/types/alloy/header.rs index e78955035..f03ebfafd 100644 --- a/crates/derive/src/types/alloy/header.rs +++ b/crates/derive/src/types/alloy/header.rs @@ -206,10 +206,7 @@ impl Header { /// /// Returns a `None` if no excess blob gas is set, no EIP-4844 support pub fn next_block_excess_blob_gas(&self) -> Option { - Some(calc_excess_blob_gas( - self.excess_blob_gas?, - self.blob_gas_used?, - )) + Some(calc_excess_blob_gas(self.excess_blob_gas?, self.blob_gas_used?)) } /// Calculate a heuristic for the in-memory size of the [Header]. @@ -257,19 +254,19 @@ impl Header { if let Some(base_fee) = self.base_fee_per_gas { length += U256::from(base_fee).length(); - } else if self.withdrawals_root.is_some() - || self.blob_gas_used.is_some() - || self.excess_blob_gas.is_some() - || self.parent_beacon_block_root.is_some() + } else if self.withdrawals_root.is_some() || + self.blob_gas_used.is_some() || + self.excess_blob_gas.is_some() || + self.parent_beacon_block_root.is_some() { length += 1; // EMPTY LIST CODE } if let Some(root) = self.withdrawals_root { length += root.length(); - } else if self.blob_gas_used.is_some() - || self.excess_blob_gas.is_some() - || self.parent_beacon_block_root.is_some() + } else if self.blob_gas_used.is_some() || + self.excess_blob_gas.is_some() || + self.parent_beacon_block_root.is_some() { length += 1; // EMPTY STRING CODE } @@ -303,10 +300,8 @@ impl Header { impl Encodable for Header { fn encode(&self, out: &mut dyn BufMut) { - let list_header = alloy_rlp::Header { - list: true, - payload_length: self.header_payload_length(), - }; + let list_header = + alloy_rlp::Header { list: true, payload_length: self.header_payload_length() }; list_header.encode(out); self.parent_hash.encode(out); self.ommers_hash.encode(out); @@ -328,10 +323,10 @@ impl Encodable for Header { // but withdrawals root is present. if let Some(ref base_fee) = self.base_fee_per_gas { U256::from(*base_fee).encode(out); - } else if self.withdrawals_root.is_some() - || self.blob_gas_used.is_some() - || self.excess_blob_gas.is_some() - || self.parent_beacon_block_root.is_some() + } else if self.withdrawals_root.is_some() || + self.blob_gas_used.is_some() || + self.excess_blob_gas.is_some() || + self.parent_beacon_block_root.is_some() { out.put_u8(EMPTY_LIST_CODE); } @@ -340,9 +335,9 @@ impl Encodable for Header { // but blob gas used is present. if let Some(ref root) = self.withdrawals_root { root.encode(out); - } else if self.blob_gas_used.is_some() - || self.excess_blob_gas.is_some() - || self.parent_beacon_block_root.is_some() + } else if self.blob_gas_used.is_some() || + self.excess_blob_gas.is_some() || + self.parent_beacon_block_root.is_some() { out.put_u8(EMPTY_STRING_CODE); } @@ -414,11 +409,7 @@ impl Decodable for Header { }; if started_len - buf.len() < rlp_head.payload_length { - if buf - .first() - .map(|b| *b == EMPTY_LIST_CODE) - .unwrap_or_default() - { + if buf.first().map(|b| *b == EMPTY_LIST_CODE).unwrap_or_default() { buf.advance(1) } else { this.base_fee_per_gas = Some(U256::decode(buf)?.to::()); @@ -427,11 +418,7 @@ impl Decodable for Header { // Withdrawals root for post-shanghai headers if started_len - buf.len() < rlp_head.payload_length { - if buf - .first() - .map(|b| *b == EMPTY_STRING_CODE) - .unwrap_or_default() - { + if buf.first().map(|b| *b == EMPTY_STRING_CODE).unwrap_or_default() { buf.advance(1) } else { this.withdrawals_root = Some(Decodable::decode(buf)?); @@ -440,11 +427,7 @@ impl Decodable for Header { // Blob gas used and excess blob gas for post-cancun headers if started_len - buf.len() < rlp_head.payload_length { - if buf - .first() - .map(|b| *b == EMPTY_LIST_CODE) - .unwrap_or_default() - { + if buf.first().map(|b| *b == EMPTY_LIST_CODE).unwrap_or_default() { buf.advance(1) } else { this.blob_gas_used = Some(U256::decode(buf)?.to::()); @@ -452,11 +435,7 @@ impl Decodable for Header { } if started_len - buf.len() < rlp_head.payload_length { - if buf - .first() - .map(|b| *b == EMPTY_LIST_CODE) - .unwrap_or_default() - { + if buf.first().map(|b| *b == EMPTY_LIST_CODE).unwrap_or_default() { buf.advance(1) } else { this.excess_blob_gas = Some(U256::decode(buf)?.to::()); diff --git a/crates/derive/src/types/alloy/mod.rs b/crates/derive/src/types/alloy/mod.rs index 379d49645..3686f7bcc 100644 --- a/crates/derive/src/types/alloy/mod.rs +++ b/crates/derive/src/types/alloy/mod.rs @@ -1,4 +1,5 @@ -//! This module contains `alloy` types that have been ported from various alloy crates to support `no_std`. +//! This module contains `alloy` types that have been ported from various alloy crates to support +//! `no_std`. mod transaction; pub use transaction::{TxDeposit, TxEip1559, TxEip2930, TxEip4844, TxEnvelope, TxLegacy, TxType}; diff --git a/crates/derive/src/types/alloy/network/transaction/signed.rs b/crates/derive/src/types/alloy/network/transaction/signed.rs index 761cfc830..eaf7dd0a7 100644 --- a/crates/derive/src/types/alloy/network/transaction/signed.rs +++ b/crates/derive/src/types/alloy/network/transaction/signed.rs @@ -39,11 +39,7 @@ impl Signed { impl Signed { /// Instantiate from a transaction and signature. Does not verify the signature. pub const fn new_unchecked(tx: T, signature: Signature, hash: B256) -> Self { - Self { - tx, - signature, - hash, - } + Self { tx, signature, hash } } /// Calculate the signing hash for the transaction. diff --git a/crates/derive/src/types/alloy/receipt.rs b/crates/derive/src/types/alloy/receipt.rs index 81f48b7dc..65b0a20e5 100644 --- a/crates/derive/src/types/alloy/receipt.rs +++ b/crates/derive/src/types/alloy/receipt.rs @@ -82,10 +82,10 @@ impl ReceiptWithBloom { } fn payload_len(&self) -> usize { - let mut payload_len = self.receipt.success.length() - + self.receipt.cumulative_gas_used.length() - + self.bloom.length() - + self.receipt.logs.len(); + let mut payload_len = self.receipt.success.length() + + self.receipt.cumulative_gas_used.length() + + self.bloom.length() + + self.receipt.logs.len(); if self.receipt.tx_type == TxType::Deposit { if let Some(deposit_nonce) = self.receipt.deposit_nonce { payload_len += deposit_nonce.length(); @@ -99,10 +99,7 @@ impl ReceiptWithBloom { /// Returns the rlp header for the receipt payload. fn receipt_rlp_header(&self) -> alloy_rlp::Header { - alloy_rlp::Header { - list: true, - payload_length: self.payload_len(), - } + alloy_rlp::Header { list: true, payload_length: self.payload_len() } } /// Encodes the receipt data. @@ -134,10 +131,7 @@ impl ReceiptWithBloom { if with_header { let payload_length = payload.len() + 1; - let header = alloy_rlp::Header { - list: false, - payload_length, - }; + let header = alloy_rlp::Header { list: false, payload_length }; header.encode(out); } @@ -177,12 +171,10 @@ impl ReceiptWithBloom { let receipt = match tx_type { TxType::Deposit => { let remaining = |b: &[u8]| rlp_head.payload_length - (started_len - b.len()) > 0; - let deposit_nonce = remaining(b) - .then(|| alloy_rlp::Decodable::decode(b)) - .transpose()?; - let deposit_receipt_version = remaining(b) - .then(|| alloy_rlp::Decodable::decode(b)) - .transpose()?; + let deposit_nonce = + remaining(b).then(|| alloy_rlp::Decodable::decode(b)).transpose()?; + let deposit_receipt_version = + remaining(b).then(|| alloy_rlp::Decodable::decode(b)).transpose()?; Receipt { tx_type, @@ -240,9 +232,9 @@ impl alloy_rlp::Decodable for ReceiptWithBloom { // a receipt is either encoded as a string (non legacy) or a list (legacy). // We should not consume the buffer if we are decoding a legacy receipt, so let's // check if the first byte is between 0x80 and 0xbf. - let rlp_type = *buf.first().ok_or(alloy_rlp::Error::Custom( - "cannot decode a receipt from empty bytes", - ))?; + let rlp_type = *buf + .first() + .ok_or(alloy_rlp::Error::Custom("cannot decode a receipt from empty bytes"))?; match rlp_type.cmp(&alloy_rlp::EMPTY_LIST_CODE) { Ordering::Less => { @@ -271,9 +263,9 @@ impl alloy_rlp::Decodable for ReceiptWithBloom { _ => Err(alloy_rlp::Error::Custom("invalid receipt type")), } } - Ordering::Equal => Err(alloy_rlp::Error::Custom( - "an empty list is not a valid receipt encoding", - )), + Ordering::Equal => { + Err(alloy_rlp::Error::Custom("an empty list is not a valid receipt encoding")) + } Ordering::Greater => Self::decode_receipt(buf, TxType::Legacy), } } diff --git a/crates/derive/src/types/alloy/transaction/deposit.rs b/crates/derive/src/types/alloy/transaction/deposit.rs index 763c388a0..5a6a873b6 100644 --- a/crates/derive/src/types/alloy/transaction/deposit.rs +++ b/crates/derive/src/types/alloy/transaction/deposit.rs @@ -78,14 +78,14 @@ impl TxDeposit { /// Outputs the length of the transaction's fields, without a RLP header or length of the /// eip155 fields. pub(crate) fn fields_len(&self) -> usize { - self.source_hash.length() - + self.from.length() - + self.to.length() - + self.mint.map_or(1, |mint| mint.length()) - + self.value.length() - + self.gas_limit.length() - + self.is_system_transaction.length() - + self.input.0.length() + self.source_hash.length() + + self.from.length() + + self.to.length() + + self.mint.map_or(1, |mint| mint.length()) + + self.value.length() + + self.gas_limit.length() + + self.is_system_transaction.length() + + self.input.0.length() } /// Encodes only the transaction's fields into the desired buffer, without a RLP header. @@ -112,10 +112,7 @@ impl TxDeposit { /// just the header and transaction rlp. pub(crate) fn encode_with_signature(&self, _: &Signature, out: &mut dyn alloy_rlp::BufMut) { let payload_length = self.fields_len(); - let header = Header { - list: true, - payload_length, - }; + let header = Header { list: true, payload_length }; header.encode(out); self.encode_fields(out); } @@ -144,11 +141,7 @@ impl TxDeposit { impl Encodable for TxDeposit { fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { - Header { - list: true, - payload_length: self.fields_len(), - } - .encode(out); + Header { list: true, payload_length: self.fields_len() }.encode(out); self.encode_fields(out); } @@ -176,11 +169,7 @@ impl Transaction for TxDeposit { fn encode_for_signing(&self, out: &mut dyn alloy_rlp::BufMut) { out.put_u8(self.tx_type() as u8); - Header { - list: true, - payload_length: self.fields_len(), - } - .encode(out); + Header { list: true, payload_length: self.fields_len() }.encode(out); self.encode_fields(out); } diff --git a/crates/derive/src/types/alloy/transaction/eip1559.rs b/crates/derive/src/types/alloy/transaction/eip1559.rs index b80dc55ec..7ebb3a8b3 100644 --- a/crates/derive/src/types/alloy/transaction/eip1559.rs +++ b/crates/derive/src/types/alloy/transaction/eip1559.rs @@ -142,10 +142,7 @@ impl TxEip1559 { out: &mut dyn alloy_rlp::BufMut, ) { let payload_length = self.fields_len() + signature.rlp_vrs_len(); - let header = Header { - list: true, - payload_length, - }; + let header = Header { list: true, payload_length }; header.encode(out); self.encode_fields(out); signature.write_rlp_vrs(out); @@ -186,11 +183,7 @@ impl TxEip1559 { impl Encodable for TxEip1559 { fn encode(&self, out: &mut dyn BufMut) { - Header { - list: true, - payload_length: self.fields_len(), - } - .encode(out); + Header { list: true, payload_length: self.fields_len() }.encode(out); self.encode_fields(out); } @@ -218,11 +211,7 @@ impl Transaction for TxEip1559 { fn encode_for_signing(&self, out: &mut dyn alloy_rlp::BufMut) { out.put_u8(self.tx_type() as u8); - Header { - list: true, - payload_length: self.fields_len(), - } - .encode(out); + Header { list: true, payload_length: self.fields_len() }.encode(out); self.encode_fields(out); } @@ -370,10 +359,6 @@ mod tests { let signed_tx = tx.into_signed(sig); assert_eq!(*signed_tx.hash(), hash, "Expected same hash"); - assert_eq!( - signed_tx.recover_signer().unwrap(), - signer, - "Recovering signer should pass." - ); + assert_eq!(signed_tx.recover_signer().unwrap(), signer, "Recovering signer should pass."); } } diff --git a/crates/derive/src/types/alloy/transaction/eip2930.rs b/crates/derive/src/types/alloy/transaction/eip2930.rs index 2c894029a..7f617739e 100644 --- a/crates/derive/src/types/alloy/transaction/eip2930.rs +++ b/crates/derive/src/types/alloy/transaction/eip2930.rs @@ -117,10 +117,7 @@ impl TxEip2930 { /// hash that for eip2718 does not require rlp header pub(crate) fn encode_with_signature(&self, signature: &Signature, out: &mut dyn BufMut) { let payload_length = self.fields_len() + signature.rlp_vrs_len(); - let header = Header { - list: true, - payload_length, - }; + let header = Header { list: true, payload_length }; header.encode(out); self.encode_fields(out); signature.write_rlp_vrs(out); @@ -147,11 +144,7 @@ impl TxEip2930 { impl Encodable for TxEip2930 { fn encode(&self, out: &mut dyn BufMut) { - Header { - list: true, - payload_length: self.fields_len(), - } - .encode(out); + Header { list: true, payload_length: self.fields_len() }.encode(out); self.encode_fields(out); } @@ -180,11 +173,7 @@ impl Transaction for TxEip2930 { fn encode_for_signing(&self, out: &mut dyn BufMut) { out.put_u8(self.tx_type() as u8); - Header { - list: true, - payload_length: self.fields_len(), - } - .encode(out); + Header { list: true, payload_length: self.fields_len() }.encode(out); self.encode_fields(out); } diff --git a/crates/derive/src/types/alloy/transaction/eip4844.rs b/crates/derive/src/types/alloy/transaction/eip4844.rs index 06f457513..c88652317 100644 --- a/crates/derive/src/types/alloy/transaction/eip4844.rs +++ b/crates/derive/src/types/alloy/transaction/eip4844.rs @@ -195,10 +195,7 @@ impl TxEip4844 { .encode(out); } out.put_u8(self.tx_type() as u8); - let header = Header { - list: true, - payload_length, - }; + let header = Header { list: true, payload_length }; header.encode(out); self.encode_fields(out); signature.encode(out); @@ -231,11 +228,7 @@ impl TxEip4844 { /// Note that there is no rlp header before the transaction type byte. pub fn encode_for_signing(&self, out: &mut dyn BufMut) { out.put_u8(self.tx_type() as u8); - Header { - list: true, - payload_length: self.fields_len(), - } - .encode(out); + Header { list: true, payload_length: self.fields_len() }.encode(out); self.encode_fields(out); } diff --git a/crates/derive/src/types/alloy/transaction/envelope.rs b/crates/derive/src/types/alloy/transaction/envelope.rs index ba18f225a..528e5d225 100644 --- a/crates/derive/src/types/alloy/transaction/envelope.rs +++ b/crates/derive/src/types/alloy/transaction/envelope.rs @@ -140,28 +140,26 @@ impl Decodable for TxEnvelope { impl Decodable2718 for TxEnvelope { fn typed_decode(ty: u8, buf: &mut &[u8]) -> Result { match ty.try_into()? { - TxType::Legacy => Ok(Self::TaggedLegacy( - Decodable::decode(buf).map_err(Eip2718Error::RlpError)?, - )), - TxType::Eip2930 => Ok(Self::Eip2930( - Decodable::decode(buf).map_err(Eip2718Error::RlpError)?, - )), - TxType::Eip1559 => Ok(Self::Eip1559( - Decodable::decode(buf).map_err(Eip2718Error::RlpError)?, - )), - TxType::Eip4844 => Ok(Self::Eip4844( - Decodable::decode(buf).map_err(Eip2718Error::RlpError)?, - )), - TxType::Deposit => Ok(Self::Deposit( - Decodable::decode(buf).map_err(Eip2718Error::RlpError)?, - )), + TxType::Legacy => { + Ok(Self::TaggedLegacy(Decodable::decode(buf).map_err(Eip2718Error::RlpError)?)) + } + TxType::Eip2930 => { + Ok(Self::Eip2930(Decodable::decode(buf).map_err(Eip2718Error::RlpError)?)) + } + TxType::Eip1559 => { + Ok(Self::Eip1559(Decodable::decode(buf).map_err(Eip2718Error::RlpError)?)) + } + TxType::Eip4844 => { + Ok(Self::Eip4844(Decodable::decode(buf).map_err(Eip2718Error::RlpError)?)) + } + TxType::Deposit => { + Ok(Self::Deposit(Decodable::decode(buf).map_err(Eip2718Error::RlpError)?)) + } } } fn fallback_decode(buf: &mut &[u8]) -> Result { - Ok(TxEnvelope::Legacy( - Decodable::decode(buf).map_err(Eip2718Error::RlpError)?, - )) + Ok(TxEnvelope::Legacy(Decodable::decode(buf).map_err(Eip2718Error::RlpError)?)) } } diff --git a/crates/derive/src/types/alloy/transaction/legacy.rs b/crates/derive/src/types/alloy/transaction/legacy.rs index 671578900..a6ba46832 100644 --- a/crates/derive/src/types/alloy/transaction/legacy.rs +++ b/crates/derive/src/types/alloy/transaction/legacy.rs @@ -85,10 +85,7 @@ impl TxLegacy { /// hash. pub fn encode_with_signature(&self, signature: &Signature, out: &mut dyn alloy_rlp::BufMut) { let payload_length = self.fields_len() + signature.rlp_vrs_len(); - let header = Header { - list: true, - payload_length, - }; + let header = Header { list: true, payload_length }; header.encode(out); self.encode_fields(out); signature.write_rlp_vrs(out); @@ -189,11 +186,8 @@ impl Transaction for TxLegacy { // type Receipt = ReceiptWithBloom; fn encode_for_signing(&self, out: &mut dyn BufMut) { - Header { - list: true, - payload_length: self.fields_len() + self.eip155_fields_len(), - } - .encode(out); + Header { list: true, payload_length: self.fields_len() + self.eip155_fields_len() } + .encode(out); self.encode_fields(out); self.encode_eip155_signing_fields(out); } diff --git a/crates/derive/src/types/batch/mod.rs b/crates/derive/src/types/batch/mod.rs index 8af807815..9102d24ba 100644 --- a/crates/derive/src/types/batch/mod.rs +++ b/crates/derive/src/types/batch/mod.rs @@ -1,4 +1,5 @@ -//! This module contains the batch types for the OP Stack derivation pipeline: [SpanBatch] & [SingleBatch]. +//! This module contains the batch types for the OP Stack derivation pipeline: [SpanBatch] & +//! [SingleBatch]. use super::DecodeError; use alloc::vec::Vec; diff --git a/crates/derive/src/types/batch/single_batch.rs b/crates/derive/src/types/batch/single_batch.rs index 1c9015b0b..6b77e913c 100644 --- a/crates/derive/src/types/batch/single_batch.rs +++ b/crates/derive/src/types/batch/single_batch.rs @@ -8,7 +8,8 @@ use alloy_rlp::{Decodable, Encodable}; /// Represents a single batch: a single encoded L2 block #[derive(Debug, Default, Clone, PartialEq, Eq)] pub struct SingleBatch { - /// Block hash of the previous L2 block. `B256::ZERO` if it has not been set by the Batch Queue. + /// Block hash of the previous L2 block. `B256::ZERO` if it has not been set by the Batch + /// Queue. pub parent_hash: BlockHash, /// The batch epoch number. Same as the first L1 block number in the epoch. pub epoch_num: u64, @@ -23,9 +24,7 @@ pub struct SingleBatch { impl SingleBatch { /// If any transactions are empty or deposited transaction types. pub fn has_invalid_transactions(&self) -> bool { - self.transactions - .iter() - .any(|tx| tx.0.is_empty() || tx.0[0] == 0x7E) + self.transactions.iter().any(|tx| tx.0.is_empty() || tx.0[0] == 0x7E) } } @@ -46,13 +45,7 @@ impl Decodable for SingleBatch { let epoch_hash = BlockHash::decode(rlp)?; let timestamp = u64::decode(rlp)?; let transactions = Vec::::decode(rlp)?; - Ok(Self { - parent_hash, - epoch_num, - epoch_hash, - timestamp, - transactions, - }) + Ok(Self { parent_hash, epoch_num, epoch_hash, timestamp, transactions }) } } diff --git a/crates/derive/src/types/batch/span_batch/batch.rs b/crates/derive/src/types/batch/span_batch/batch.rs index 5bd5a312b..23c9247a6 100644 --- a/crates/derive/src/types/batch/span_batch/batch.rs +++ b/crates/derive/src/types/batch/span_batch/batch.rs @@ -67,8 +67,9 @@ impl SpanBatch { }) } - /// Converts all [SpanBatchElement]s after the L2 safe head to [SingleBatch]es. The resulting [SingleBatch]es do - /// not contain a parent hash, as it is populated by the Batch Queue stage. + /// Converts all [SpanBatchElement]s after the L2 safe head to [SingleBatch]es. The resulting + /// [SingleBatch]es do not contain a parent hash, as it is populated by the Batch Queue + /// stage. pub fn get_singular_batches( &self, l1_origins: Vec, @@ -112,11 +113,7 @@ impl SpanBatch { panic!("Batch is not ordered"); } - let SingleBatch { - epoch_hash, - parent_hash, - .. - } = singular_batch; + let SingleBatch { epoch_hash, parent_hash, .. } = singular_batch; // Always append the new batch and set the L1 origin check. self.batches.push(singular_batch.into()); @@ -124,7 +121,8 @@ impl SpanBatch { self.l1_origin_check = epoch_hash[..20].try_into().expect("Sub-slice cannot fail"); let epoch_bit = if self.batches.len() == 1 { - // If there is only one batch, initialize the parent check and set the epoch bit based on the sequence number. + // If there is only one batch, initialize the parent check and set the epoch bit based + // on the sequence number. self.parent_check = parent_hash[..20].try_into().expect("Sub-slice cannot fail"); seq_num == 0 } else { diff --git a/crates/derive/src/types/batch/span_batch/bits.rs b/crates/derive/src/types/batch/span_batch/bits.rs index c0be418a5..4646155eb 100644 --- a/crates/derive/src/types/batch/span_batch/bits.rs +++ b/crates/derive/src/types/batch/span_batch/bits.rs @@ -29,8 +29,8 @@ impl From for Vec { impl SpanBatchBits { /// Decodes a standard span-batch bitlist from a reader. - /// The bitlist is encoded as big-endian integer, left-padded with zeroes to a multiple of 8 bits. - /// The encoded bitlist cannot be longer than [MAX_SPAN_BATCH_SIZE]. + /// The bitlist is encoded as big-endian integer, left-padded with zeroes to a multiple of 8 + /// bits. The encoded bitlist cannot be longer than [MAX_SPAN_BATCH_SIZE]. pub fn decode(b: &mut &[u8], bit_length: usize) -> Result { let buffer_len = bit_length / 8 + if bit_length % 8 != 0 { 1 } else { 0 }; if buffer_len > MAX_SPAN_BATCH_SIZE { @@ -59,8 +59,8 @@ impl SpanBatchBits { } /// Encodes a standard span-batch bitlist. - /// The bitlist is encoded as big-endian integer, left-padded with zeroes to a multiple of 8 bits. - /// The encoded bitlist cannot be longer than [MAX_SPAN_BATCH_SIZE]. + /// The bitlist is encoded as big-endian integer, left-padded with zeroes to a multiple of 8 + /// bits. The encoded bitlist cannot be longer than [MAX_SPAN_BATCH_SIZE]. pub fn encode( w: &mut Vec, bit_length: usize, @@ -97,11 +97,7 @@ impl SpanBatchBits { // Shift the bits of the byte to the right, based on the bit index, and // mask it with 1 to isolate the bit we're interested in. // If the result is not zero, the bit is set to 1, otherwise it's 0. - Some(if byte & (1 << (8 - bit_index)) != 0 { - 1 - } else { - 0 - }) + Some(if byte & (1 << (8 - bit_index)) != 0 { 1 } else { 0 }) } else { // Return None if the index is out of bounds None @@ -134,10 +130,12 @@ impl SpanBatchBits { /// Calculates the bit length of the [SpanBatchBits] bitfield. pub fn bit_len(&self) -> usize { if let Some((top_word, rest)) = self.0.split_last() { - // Calculate bit length. Rust's leading_zeros counts zeros from the MSB, so subtract from total bits. + // Calculate bit length. Rust's leading_zeros counts zeros from the MSB, so subtract + // from total bits. let significant_bits = 8 - top_word.leading_zeros() as usize; - // Return total bits, taking into account the full words in `rest` and the significant bits in `top`. + // Return total bits, taking into account the full words in `rest` and the significant + // bits in `top`. rest.len() * 8 + significant_bits } else { // If the slice is empty, return 0. diff --git a/crates/derive/src/types/batch/span_batch/builder.rs b/crates/derive/src/types/batch/span_batch/builder.rs index 5f8787a82..ccfe2c992 100644 --- a/crates/derive/src/types/batch/span_batch/builder.rs +++ b/crates/derive/src/types/batch/span_batch/builder.rs @@ -2,8 +2,7 @@ #![allow(unused)] -use crate::types::SingleBatch; -use crate::types::{RawSpanBatch, SpanBatch, SpanBatchElement}; +use crate::types::{RawSpanBatch, SingleBatch, SpanBatch, SpanBatchElement}; use alloc::vec::Vec; use alloy_primitives::FixedBytes; diff --git a/crates/derive/src/types/batch/span_batch/errors.rs b/crates/derive/src/types/batch/span_batch/errors.rs index 230e1149b..7245a41ab 100644 --- a/crates/derive/src/types/batch/span_batch/errors.rs +++ b/crates/derive/src/types/batch/span_batch/errors.rs @@ -26,10 +26,9 @@ impl Display for SpanBatchError { match self { SpanBatchError::TooBigSpanBatchSize => write!(f, "The span batch is too big"), SpanBatchError::BitfieldTooLong => write!(f, "The bit field is too long"), - SpanBatchError::InvalidBitSlice => write!( - f, - "Failed to set [alloy_primitives::U256] from big-endian slice" - ), + SpanBatchError::InvalidBitSlice => { + write!(f, "Failed to set [alloy_primitives::U256] from big-endian slice") + } SpanBatchError::EmptySpanBatch => write!(f, "Empty Span Batch"), SpanBatchError::MissingL1Origin => write!(f, "Missing L1 origin"), SpanBatchError::Encoding(e) => write!(f, "Encoding error: {:?}", e), @@ -73,7 +72,8 @@ pub enum SpanDecodingError { BlockTxCounts, /// Failed to decode transaction nonces TxNonces, - /// Mismatch in length between the transaction type and signature arrays in a span batch transaction payload. + /// Mismatch in length between the transaction type and signature arrays in a span batch + /// transaction payload. TypeSignatureLenMismatch, /// Invalid transaction type InvalidTransactionType, diff --git a/crates/derive/src/types/batch/span_batch/payload.rs b/crates/derive/src/types/batch/span_batch/payload.rs index 918c4f115..e0eb57e7b 100644 --- a/crates/derive/src/types/batch/span_batch/payload.rs +++ b/crates/derive/src/types/batch/span_batch/payload.rs @@ -10,7 +10,8 @@ use alloc::vec::Vec; pub struct SpanBatchPayload { /// Number of L2 block in the span pub block_count: u64, - /// Standard span-batch bitlist of blockCount bits. Each bit indicates if the L1 origin is changed at the L2 block. + /// Standard span-batch bitlist of blockCount bits. Each bit indicates if the L1 origin is + /// changed at the L2 block. pub origin_bits: SpanBatchBits, /// List of transaction counts for each L2 block pub block_tx_counts: Vec, @@ -47,7 +48,8 @@ impl SpanBatchPayload { pub fn decode_block_count(&mut self, r: &mut &[u8]) -> Result<(), SpanBatchError> { let (block_count, remaining) = unsigned_varint::decode::u64(r) .map_err(|_| SpanBatchError::Decoding(SpanDecodingError::BlockCount))?; - // The number of transactions in a single L2 block cannot be greater than [MAX_SPAN_BATCH_SIZE]. + // The number of transactions in a single L2 block cannot be greater than + // [MAX_SPAN_BATCH_SIZE]. if block_count as usize > MAX_SPAN_BATCH_SIZE { return Err(SpanBatchError::TooBigSpanBatchSize); } @@ -61,15 +63,17 @@ impl SpanBatchPayload { /// Decode block transaction counts from a reader. pub fn decode_block_tx_counts(&mut self, r: &mut &[u8]) -> Result<(), SpanBatchError> { - // Initially allocate the vec with the block count, to reduce re-allocations in the first few blocks. + // Initially allocate the vec with the block count, to reduce re-allocations in the first + // few blocks. let mut block_tx_counts = Vec::with_capacity(self.block_count as usize); for _ in 0..self.block_count { let (block_tx_count, remaining) = unsigned_varint::decode::u64(r) .map_err(|_| SpanBatchError::Decoding(SpanDecodingError::BlockTxCounts))?; - // The number of transactions in a single L2 block cannot be greater than [MAX_SPAN_BATCH_SIZE]. - // Every transaction will take at least a single byte. + // The number of transactions in a single L2 block cannot be greater than + // [MAX_SPAN_BATCH_SIZE]. Every transaction will take at least a single + // byte. if block_tx_count as usize > MAX_SPAN_BATCH_SIZE { return Err(SpanBatchError::TooBigSpanBatchSize); } @@ -87,14 +91,12 @@ impl SpanBatchPayload { } let total_block_tx_count = - self.block_tx_counts - .iter() - .try_fold(0u64, |acc, block_tx_count| { - acc.checked_add(*block_tx_count) - .ok_or(SpanBatchError::TooBigSpanBatchSize) - })?; - - // The total number of transactions in a span batch cannot be greater than [MAX_SPAN_BATCH_SIZE]. + self.block_tx_counts.iter().try_fold(0u64, |acc, block_tx_count| { + acc.checked_add(*block_tx_count).ok_or(SpanBatchError::TooBigSpanBatchSize) + })?; + + // The total number of transactions in a span batch cannot be greater than + // [MAX_SPAN_BATCH_SIZE]. if total_block_tx_count as usize > MAX_SPAN_BATCH_SIZE { return Err(SpanBatchError::TooBigSpanBatchSize); } @@ -111,10 +113,7 @@ impl SpanBatchPayload { /// Encode the block count into a writer. pub fn encode_block_count(&self, w: &mut Vec) { let mut u64_varint_buf = [0u8; 10]; - w.extend_from_slice(unsigned_varint::encode::u64( - self.block_count, - &mut u64_varint_buf, - )); + w.extend_from_slice(unsigned_varint::encode::u64(self.block_count, &mut u64_varint_buf)); } /// Encode the block transaction counts into a writer. @@ -122,10 +121,7 @@ impl SpanBatchPayload { let mut u64_varint_buf = [0u8; 10]; for block_tx_count in &self.block_tx_counts { u64_varint_buf.fill(0); - w.extend_from_slice(unsigned_varint::encode::u64( - *block_tx_count, - &mut u64_varint_buf, - )); + w.extend_from_slice(unsigned_varint::encode::u64(*block_tx_count, &mut u64_varint_buf)); } } diff --git a/crates/derive/src/types/batch/span_batch/prefix.rs b/crates/derive/src/types/batch/span_batch/prefix.rs index f136482a2..350082edc 100644 --- a/crates/derive/src/types/batch/span_batch/prefix.rs +++ b/crates/derive/src/types/batch/span_batch/prefix.rs @@ -67,14 +67,8 @@ impl SpanBatchPrefix { /// Encodes the [SpanBatchPrefix] into a writer. pub fn encode_prefix(&self, w: &mut Vec) { let mut u64_buf = [0u8; 10]; - w.extend_from_slice(unsigned_varint::encode::u64( - self.rel_timestamp, - &mut u64_buf, - )); - w.extend_from_slice(unsigned_varint::encode::u64( - self.l1_origin_num, - &mut u64_buf, - )); + w.extend_from_slice(unsigned_varint::encode::u64(self.rel_timestamp, &mut u64_buf)); + w.extend_from_slice(unsigned_varint::encode::u64(self.l1_origin_num, &mut u64_buf)); w.extend_from_slice(self.parent_check.as_slice()); w.extend_from_slice(self.l1_origin_check.as_slice()); } @@ -98,9 +92,6 @@ mod test { let mut buf = Vec::new(); expected.encode_prefix(&mut buf); - assert_eq!( - SpanBatchPrefix::decode_prefix(&mut buf.as_slice()).unwrap(), - expected - ); + assert_eq!(SpanBatchPrefix::decode_prefix(&mut buf.as_slice()).unwrap(), expected); } } diff --git a/crates/derive/src/types/batch/span_batch/raw.rs b/crates/derive/src/types/batch/span_batch/raw.rs index aef202957..0dcc3eacc 100644 --- a/crates/derive/src/types/batch/span_batch/raw.rs +++ b/crates/derive/src/types/batch/span_batch/raw.rs @@ -37,8 +37,9 @@ impl RawSpanBatch { Ok(Self { prefix, payload }) } - /// Converts a [RawSpanBatch] into a [SpanBatch], which has a list of [SpanBatchElement]s. Thos function does not - /// populate the [SpanBatch] with chain configuration data, which is required for making payload attributes. + /// Converts a [RawSpanBatch] into a [SpanBatch], which has a list of [SpanBatchElement]s. Thos + /// function does not populate the [SpanBatch] with chain configuration data, which is + /// required for making payload attributes. pub fn derive( &mut self, block_time: u64, @@ -57,9 +58,9 @@ impl RawSpanBatch { .payload .origin_bits .get_bit(i as usize) - .ok_or(SpanBatchError::Decoding(SpanDecodingError::L1OriginCheck))? - == 1 - && i > 0 + .ok_or(SpanBatchError::Decoding(SpanDecodingError::L1OriginCheck))? == + 1 && + i > 0 { l1_origin_number -= 1; } @@ -82,10 +83,7 @@ impl RawSpanBatch { acc.push(SpanBatchElement { epoch_num: block_origin_nums[i as usize], timestamp: genesis_time + self.prefix.rel_timestamp + block_time * i, - transactions: transactions - .into_iter() - .map(|v| RawTransaction(v.into())) - .collect(), + transactions: transactions.into_iter().map(|v| RawTransaction(v.into())).collect(), }); acc }); diff --git a/crates/derive/src/types/batch/span_batch/signature.rs b/crates/derive/src/types/batch/span_batch/signature.rs index 61a20d289..3b5171f3c 100644 --- a/crates/derive/src/types/batch/span_batch/signature.rs +++ b/crates/derive/src/types/batch/span_batch/signature.rs @@ -15,11 +15,7 @@ pub struct SpanBatchSignature { impl From for SpanBatchSignature { fn from(value: Signature) -> Self { - Self { - v: value.v().to_u64(), - r: value.r(), - s: value.s(), - } + Self { v: value.v().to_u64(), r: value.r(), s: value.s() } } } @@ -27,11 +23,7 @@ impl TryFrom for Signature { type Error = SpanBatchError; fn try_from(value: SpanBatchSignature) -> Result { - Self::from_rs_and_parity( - value.r, - value.s, - convert_v_to_y_parity(value.v, TxType::Legacy)?, - ) - .map_err(|_| SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionSignature)) + Self::from_rs_and_parity(value.r, value.s, convert_v_to_y_parity(value.v, TxType::Legacy)?) + .map_err(|_| SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionSignature)) } } diff --git a/crates/derive/src/types/batch/span_batch/transactions.rs b/crates/derive/src/types/batch/span_batch/transactions.rs index 5016ab80c..58f584b9a 100644 --- a/crates/derive/src/types/batch/span_batch/transactions.rs +++ b/crates/derive/src/types/batch/span_batch/transactions.rs @@ -1,12 +1,13 @@ -//! This module contains the [SpanBatchTransactions] type and logic for encoding and decoding transactions in a span batch. +//! This module contains the [SpanBatchTransactions] type and logic for encoding and decoding +//! transactions in a span batch. use alloc::vec::Vec; use alloy_primitives::{Address, U256}; use alloy_rlp::{Buf, Decodable, Encodable}; -use super::{convert_v_to_y_parity, read_tx_data}; use super::{ - SpanBatchBits, SpanBatchError, SpanBatchSignature, SpanBatchTransactionData, SpanDecodingError, + convert_v_to_y_parity, read_tx_data, SpanBatchBits, SpanBatchError, SpanBatchSignature, + SpanBatchTransactionData, SpanDecodingError, }; use crate::types::{RawTransaction, Transaction, TxEnvelope, TxKind, TxType}; @@ -66,11 +67,7 @@ impl SpanBatchTransactions { /// Encode the contract creation bits into a writer. pub fn encode_contract_creation_bits(&self, w: &mut Vec) -> Result<(), SpanBatchError> { - SpanBatchBits::encode( - w, - self.total_block_tx_count as usize, - &self.contract_creation_bits, - )?; + SpanBatchBits::encode(w, self.total_block_tx_count as usize, &self.contract_creation_bits)?; Ok(()) } @@ -155,11 +152,7 @@ impl SpanBatchTransactions { for _ in 0..self.total_block_tx_count { let r_val = U256::from_be_slice(&r[..32]); let s_val = U256::from_be_slice(&r[32..64]); - sigs.push(SpanBatchSignature { - v: 0, - r: r_val, - s: s_val, - }); + sigs.push(SpanBatchSignature { v: 0, r: r_val, s: s_val }); r.advance(64); } self.tx_sigs = sigs; @@ -210,7 +203,8 @@ impl SpanBatchTransactions { let mut tx_datas = Vec::new(); let mut tx_types = Vec::new(); - // Do not need the transaction data header because the RLP stream already includes the length information. + // Do not need the transaction data header because the RLP stream already includes the + // length information. for _ in 0..self.total_block_tx_count { let (tx_data, tx_type) = read_tx_data(r)?; tx_datas.push(tx_data); @@ -228,26 +222,17 @@ impl SpanBatchTransactions { /// Returns the number of contract creation transactions in the span batch. pub fn contract_creation_count(&self) -> u64 { - self.contract_creation_bits - .0 - .iter() - .map(|b| b.count_ones() as u64) - .sum() + self.contract_creation_bits.0.iter().map(|b| b.count_ones() as u64).sum() } /// Recover the `v` values of the transaction signatures. pub fn recover_v(&mut self, chain_id: u64) -> Result<(), SpanBatchError> { if self.tx_sigs.len() != self.tx_types.len() { - return Err(SpanBatchError::Decoding( - SpanDecodingError::TypeSignatureLenMismatch, - )); + return Err(SpanBatchError::Decoding(SpanDecodingError::TypeSignatureLenMismatch)); } let mut protected_bits_idx = 0; for (i, tx_type) in self.tx_types.iter().enumerate() { - let bit = self - .y_parity_bits - .get_bit(i) - .ok_or(SpanBatchError::BitfieldTooLong)?; + let bit = self.y_parity_bits.get_bit(i).ok_or(SpanBatchError::BitfieldTooLong)?; let v = match tx_type { TxType::Legacy => { // Legacy transaction @@ -261,9 +246,7 @@ impl SpanBatchTransactions { } } TxType::Eip2930 | TxType::Eip1559 => Ok(bit as u64), - _ => Err(SpanBatchError::Decoding( - SpanDecodingError::InvalidTransactionType, - )), + _ => Err(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionType)), }?; self.tx_sigs.get_mut(i).expect("Transaction must exist").v = v; } @@ -281,23 +264,18 @@ impl SpanBatchTransactions { let nonce = self .tx_nonces .get(idx as usize) - .ok_or(SpanBatchError::Decoding( - SpanDecodingError::InvalidTransactionData, - ))?; + .ok_or(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData))?; let gas = self .tx_gases .get(idx as usize) - .ok_or(SpanBatchError::Decoding( - SpanDecodingError::InvalidTransactionData, - ))?; - let bit = self.contract_creation_bits.get_bit(idx as usize).ok_or( - SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData), - )?; + .ok_or(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData))?; + let bit = self + .contract_creation_bits + .get_bit(idx as usize) + .ok_or(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData))?; let to = if bit == 0 { if self.tx_tos.len() <= to_idx { - return Err(SpanBatchError::Decoding( - SpanDecodingError::InvalidTransactionData, - )); + return Err(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData)); } to_idx += 1; Some(self.tx_tos[to_idx - 1]) @@ -307,9 +285,7 @@ impl SpanBatchTransactions { let sig = *self .tx_sigs .get(idx as usize) - .ok_or(SpanBatchError::Decoding( - SpanDecodingError::InvalidTransactionData, - ))?; + .ok_or(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData))?; let tx_envelope = tx.to_enveloped_tx(*nonce, *gas, to, chain_id, sig.try_into()?)?; let mut buf = Vec::new(); tx_envelope.encode(&mut buf); @@ -335,8 +311,7 @@ impl SpanBatchTransactions { let tx_type = tx_enveloped.tx_type(); if matches!(tx_type, TxType::Legacy) { // TODO: Check protected signature - self.protected_bits - .set_bit(self.legacy_tx_count as usize, false); + self.protected_bits.set_bit(self.legacy_tx_count as usize, false); self.legacy_tx_count += 1; } @@ -347,9 +322,7 @@ impl SpanBatchTransactions { TxEnvelope::Eip2930(s) => (*s.signature(), s.to(), s.nonce(), s.gas_limit()), TxEnvelope::Eip1559(s) => (*s.signature(), s.to(), s.nonce(), s.gas_limit()), _ => { - return Err(SpanBatchError::Decoding( - SpanDecodingError::InvalidTransactionData, - )) + return Err(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData)) } }; let signature_v = signature.v().to_u64(); @@ -365,10 +338,8 @@ impl SpanBatchTransactions { span_batch_tx.encode(&mut tx_data_buf); self.tx_sigs.push(signature.into()); - self.contract_creation_bits - .set_bit((i + offset) as usize, contract_creation_bit == 1); - self.y_parity_bits - .set_bit((i + offset) as usize, y_parity_bit); + self.contract_creation_bits.set_bit((i + offset) as usize, contract_creation_bit == 1); + self.y_parity_bits.set_bit((i + offset) as usize, y_parity_bit); self.tx_nonces.push(nonce); self.tx_datas.push(tx_data_buf); self.tx_gases.push(gas); diff --git a/crates/derive/src/types/batch/span_batch/tx_data/eip1559.rs b/crates/derive/src/types/batch/span_batch/tx_data/eip1559.rs index 4a0518486..aba7ab9ae 100644 --- a/crates/derive/src/types/batch/span_batch/tx_data/eip1559.rs +++ b/crates/derive/src/types/batch/span_batch/tx_data/eip1559.rs @@ -1,8 +1,8 @@ //! This module contains the eip1559 transaction data type for a span batch. -use crate::types::eip2930::AccessList; use crate::types::{ - Signed, SpanBatchError, SpanDecodingError, Transaction, TxEip1559, TxEnvelope, TxKind, + eip2930::AccessList, Signed, SpanBatchError, SpanDecodingError, Transaction, TxEip1559, + TxEnvelope, TxKind, }; use alloy_primitives::{Address, Signature, U256}; use alloy_rlp::{Bytes, RlpDecodable, RlpEncodable}; @@ -36,25 +36,17 @@ impl SpanBatchEip1559TransactionData { chain_id, nonce, max_fee_per_gas: u128::from_be_bytes( - self.max_fee_per_gas.to_be_bytes::<32>()[16..] - .try_into() - .map_err(|_| { - SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData) - })?, + self.max_fee_per_gas.to_be_bytes::<32>()[16..].try_into().map_err(|_| { + SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData) + })?, ), max_priority_fee_per_gas: u128::from_be_bytes( - self.max_priority_fee_per_gas.to_be_bytes::<32>()[16..] - .try_into() - .map_err(|_| { - SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData) - })?, + self.max_priority_fee_per_gas.to_be_bytes::<32>()[16..].try_into().map_err( + |_| SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData), + )?, ), gas_limit: gas, - to: if let Some(to) = to { - TxKind::Call(to) - } else { - TxKind::Create - }, + to: if let Some(to) = to { TxKind::Call(to) } else { TxKind::Create }, value: self.value, input: self.data.clone().into(), access_list: self.access_list.clone(), @@ -86,10 +78,7 @@ mod test { let decoded = SpanBatchTransactionData::decode(&mut encoded_buf.as_slice()).unwrap(); let SpanBatchTransactionData::Eip1559(variable_fee_decoded) = decoded else { - panic!( - "Expected SpanBatchEip1559TransactionData, got {:?}", - decoded - ); + panic!("Expected SpanBatchEip1559TransactionData, got {:?}", decoded); }; assert_eq!(variable_fee_tx, variable_fee_decoded); diff --git a/crates/derive/src/types/batch/span_batch/tx_data/eip2930.rs b/crates/derive/src/types/batch/span_batch/tx_data/eip2930.rs index d12e9240d..6584fdbe4 100644 --- a/crates/derive/src/types/batch/span_batch/tx_data/eip2930.rs +++ b/crates/derive/src/types/batch/span_batch/tx_data/eip2930.rs @@ -1,8 +1,8 @@ //! This module contains the eip2930 transaction data type for a span batch. -use crate::types::eip2930::AccessList; use crate::types::{ - Signed, SpanBatchError, SpanDecodingError, Transaction, TxEip2930, TxEnvelope, TxKind, + eip2930::AccessList, Signed, SpanBatchError, SpanDecodingError, Transaction, TxEip2930, + TxEnvelope, TxKind, }; use alloy_primitives::{Address, Signature, U256}; use alloy_rlp::{Bytes, RlpDecodable, RlpEncodable}; @@ -34,18 +34,12 @@ impl SpanBatchEip2930TransactionData { chain_id, nonce, gas_price: u128::from_be_bytes( - self.gas_price.to_be_bytes::<32>()[16..] - .try_into() - .map_err(|_| { - SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData) - })?, + self.gas_price.to_be_bytes::<32>()[16..].try_into().map_err(|_| { + SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData) + })?, ), gas_limit: gas, - to: if let Some(to) = to { - TxKind::Call(to) - } else { - TxKind::Create - }, + to: if let Some(to) = to { TxKind::Call(to) } else { TxKind::Create }, value: self.value, input: self.data.clone().into(), access_list: self.access_list.clone(), @@ -77,10 +71,7 @@ mod test { let decoded = SpanBatchTransactionData::decode(&mut encoded_buf.as_slice()).unwrap(); let SpanBatchTransactionData::Eip2930(access_list_decoded) = decoded else { - panic!( - "Expected SpanBatchEip2930TransactionData, got {:?}", - decoded - ); + panic!("Expected SpanBatchEip2930TransactionData, got {:?}", decoded); }; assert_eq!(access_list_tx, access_list_decoded); diff --git a/crates/derive/src/types/batch/span_batch/tx_data/legacy.rs b/crates/derive/src/types/batch/span_batch/tx_data/legacy.rs index 3e3a79be2..fb95412af 100644 --- a/crates/derive/src/types/batch/span_batch/tx_data/legacy.rs +++ b/crates/derive/src/types/batch/span_batch/tx_data/legacy.rs @@ -31,18 +31,12 @@ impl SpanBatchLegacyTransactionData { chain_id: Some(chain_id), nonce, gas_price: u128::from_be_bytes( - self.gas_price.to_be_bytes::<32>()[16..] - .try_into() - .map_err(|_| { - SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData) - })?, + self.gas_price.to_be_bytes::<32>()[16..].try_into().map_err(|_| { + SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData) + })?, ), gas_limit: gas, - to: if let Some(to) = to { - TxKind::Call(to) - } else { - TxKind::Create - }, + to: if let Some(to) = to { TxKind::Call(to) } else { TxKind::Create }, value: self.value, input: self.data.clone().into(), }; diff --git a/crates/derive/src/types/batch/span_batch/tx_data/wrapper.rs b/crates/derive/src/types/batch/span_batch/tx_data/wrapper.rs index fa7ef9228..f71bf7529 100644 --- a/crates/derive/src/types/batch/span_batch/tx_data/wrapper.rs +++ b/crates/derive/src/types/batch/span_batch/tx_data/wrapper.rs @@ -41,9 +41,7 @@ impl Decodable for SpanBatchTransactionData { fn decode(r: &mut &[u8]) -> Result { if !r.is_empty() && r[0] > 0x7F { // Legacy transaction - return Ok(SpanBatchTransactionData::Legacy( - SpanBatchLegacyTransactionData::decode(r)?, - )); + return Ok(SpanBatchTransactionData::Legacy(SpanBatchLegacyTransactionData::decode(r)?)); } // Non-legacy transaction (EIP-2718 envelope encoding) Self::decode_typed(r) @@ -55,33 +53,31 @@ impl TryFrom<&TxEnvelope> for SpanBatchTransactionData { fn try_from(tx_envelope: &TxEnvelope) -> Result { match tx_envelope { - TxEnvelope::Legacy(s) => Ok(SpanBatchTransactionData::Legacy( - SpanBatchLegacyTransactionData { + TxEnvelope::Legacy(s) => { + Ok(SpanBatchTransactionData::Legacy(SpanBatchLegacyTransactionData { value: s.value, gas_price: U256::from(s.gas_price), data: Bytes::from(s.input().to_vec()), - }, - )), - TxEnvelope::Eip2930(s) => Ok(SpanBatchTransactionData::Eip2930( - SpanBatchEip2930TransactionData { + })) + } + TxEnvelope::Eip2930(s) => { + Ok(SpanBatchTransactionData::Eip2930(SpanBatchEip2930TransactionData { value: s.value, gas_price: U256::from(s.gas_price), data: Bytes::from(s.input().to_vec()), access_list: s.access_list.clone(), - }, - )), - TxEnvelope::Eip1559(s) => Ok(SpanBatchTransactionData::Eip1559( - SpanBatchEip1559TransactionData { + })) + } + TxEnvelope::Eip1559(s) => { + Ok(SpanBatchTransactionData::Eip1559(SpanBatchEip1559TransactionData { value: s.value, max_fee_per_gas: U256::from(s.max_fee_per_gas), max_priority_fee_per_gas: U256::from(s.max_priority_fee_per_gas), data: Bytes::from(s.input().to_vec()), access_list: s.access_list.clone(), - }, - )), - _ => Err(SpanBatchError::Decoding( - SpanDecodingError::InvalidTransactionType, - )), + })) + } + _ => Err(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionType)), } } } @@ -102,10 +98,7 @@ impl SpanBatchTransactionData { return Err(alloy_rlp::Error::Custom("Invalid transaction data")); } - match b[0] - .try_into() - .map_err(|_| alloy_rlp::Error::Custom("Invalid tx type"))? - { + match b[0].try_into().map_err(|_| alloy_rlp::Error::Custom("Invalid tx type"))? { TxType::Eip2930 => Ok(SpanBatchTransactionData::Eip2930( SpanBatchEip2930TransactionData::decode(&mut &b[1..])?, )), diff --git a/crates/derive/src/types/batch/span_batch/utils.rs b/crates/derive/src/types/batch/span_batch/utils.rs index dc227d608..556e34c9e 100644 --- a/crates/derive/src/types/batch/span_batch/utils.rs +++ b/crates/derive/src/types/batch/span_batch/utils.rs @@ -8,9 +8,8 @@ use alloy_rlp::{Buf, Header}; /// Reads transaction data from a reader. pub(crate) fn read_tx_data(r: &mut &[u8]) -> Result<(Vec, TxType), SpanBatchError> { let mut tx_data = Vec::new(); - let first_byte = *r.first().ok_or(SpanBatchError::Decoding( - SpanDecodingError::InvalidTransactionData, - ))?; + let first_byte = + *r.first().ok_or(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData))?; let mut tx_type = 0; if first_byte <= 0x7F { // EIP-2718: Non-legacy tx, so write tx type @@ -20,8 +19,9 @@ pub(crate) fn read_tx_data(r: &mut &[u8]) -> Result<(Vec, TxType), SpanBatch } // Copy the reader, as we need to read the header to determine if the payload is a list. - // TODO(clabby): This is horribly inefficient. It'd be nice if we could peek at this rather than forcibly having to - // advance the buffer passed, should read more into the alloy rlp docs to see if this is possible. + // TODO(clabby): This is horribly inefficient. It'd be nice if we could peek at this rather than + // forcibly having to advance the buffer passed, should read more into the alloy rlp docs to + // see if this is possible. let r_copy = Vec::from(*r); let rlp_header = Header::decode(&mut r_copy.as_slice()) .map_err(|_| SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData))?; @@ -33,9 +33,7 @@ pub(crate) fn read_tx_data(r: &mut &[u8]) -> Result<(Vec, TxType), SpanBatch r.advance(payload_length_with_header); Ok(payload) } else { - Err(SpanBatchError::Decoding( - SpanDecodingError::InvalidTransactionData, - )) + Err(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData)) }?; tx_data.extend_from_slice(&tx_payload); @@ -60,8 +58,6 @@ pub(crate) fn convert_v_to_y_parity(v: u64, tx_type: TxType) -> Result Ok(v == 1), - _ => Err(SpanBatchError::Decoding( - SpanDecodingError::InvalidTransactionType, - )), + _ => Err(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionType)), } } diff --git a/crates/derive/src/types/block.rs b/crates/derive/src/types/block.rs index cfdaaf979..aa56b83e1 100644 --- a/crates/derive/src/types/block.rs +++ b/crates/derive/src/types/block.rs @@ -22,12 +22,7 @@ pub struct BlockInfo { impl BlockInfo { /// Instantiates a new [BlockInfo]. pub fn new(hash: B256, number: u64, parent_hash: B256, timestamp: u64) -> Self { - Self { - hash, - number, - parent_hash, - timestamp, - } + Self { hash, number, parent_hash, timestamp } } } @@ -46,11 +41,7 @@ pub struct L2BlockInfo { impl L2BlockInfo { /// Instantiates a new [L2BlockInfo]. pub fn new(block_info: BlockInfo, l1_origin: BlockId, seq_num: u64) -> Self { - Self { - block_info, - l1_origin, - seq_num, - } + Self { block_info, l1_origin, seq_num } } } diff --git a/crates/derive/src/types/channel.rs b/crates/derive/src/types/channel.rs index b732e0b01..671e182f7 100644 --- a/crates/derive/src/types/channel.rs +++ b/crates/derive/src/types/channel.rs @@ -37,12 +37,7 @@ pub struct Channel { impl Channel { /// Create a new [Channel] with the given [ChannelID] and [BlockInfo]. pub fn new(id: ChannelID, open_block: BlockInfo) -> Self { - Self { - id, - open_block, - inputs: HashMap::new(), - ..Default::default() - } + Self { id, open_block, inputs: HashMap::new(), ..Default::default() } } /// Add a frame to the channel. @@ -60,16 +55,10 @@ impl Channel { bail!("Frame ID does not match channel ID"); } if frame.is_last && self.closed { - bail!( - "Cannot add ending frame to a closed channel. Channel ID: {:?}", - self.id - ); + bail!("Cannot add ending frame to a closed channel. Channel ID: {:?}", self.id); } if self.inputs.contains_key(&frame.number) { - bail!( - "Frame number already exists in channel. Channel ID: {:?}", - self.id - ); + bail!("Frame number already exists in channel. Channel ID: {:?}", self.id); } if self.closed && frame.number >= self.last_frame_number { bail!( @@ -84,7 +73,8 @@ impl Channel { self.last_frame_number = frame.number; self.closed = true; - // Prune frames with a higher number than the last frame number when we receive a closing frame. + // Prune frames with a higher number than the last frame number when we receive a + // closing frame. if self.last_frame_number < self.highest_frame_number { self.inputs.retain(|id, frame| { self.estimated_size -= frame.size(); @@ -144,10 +134,7 @@ impl Channel { pub fn frame_data(&self) -> Result { let mut data = Vec::with_capacity(self.size()); (0..=self.last_frame_number).try_for_each(|i| { - let frame = self - .inputs - .get(&i) - .ok_or_else(|| anyhow!("Frame not found"))?; + let frame = self.inputs.get(&i).ok_or_else(|| anyhow!("Frame not found"))?; data.extend_from_slice(&frame.data); Ok(()) })?; @@ -183,8 +170,8 @@ mod test { let block = BlockInfo::default(); let mut channel = Channel::new(id, block); - if test_case.frames.len() != test_case.should_error.len() - || test_case.frames.len() != test_case.sizes.len() + if test_case.frames.len() != test_case.should_error.len() || + test_case.frames.len() != test_case.sizes.len() { panic!("Test case length mismatch"); } @@ -206,28 +193,15 @@ mod test { let test_cases = [ FrameValidityTestCase { name: "wrong channel".to_string(), - frames: vec![Frame { - id: [0xEE; 16], - ..Default::default() - }], + frames: vec![Frame { id: [0xEE; 16], ..Default::default() }], should_error: vec![true], sizes: vec![0], }, FrameValidityTestCase { name: "double close".to_string(), frames: vec![ - Frame { - id, - is_last: true, - number: 2, - data: b"four".to_vec(), - }, - Frame { - id, - is_last: true, - number: 1, - ..Default::default() - }, + Frame { id, is_last: true, number: 2, data: b"four".to_vec() }, + Frame { id, is_last: true, number: 1, ..Default::default() }, ], should_error: vec![false, true], sizes: vec![204, 204], @@ -235,18 +209,8 @@ mod test { FrameValidityTestCase { name: "duplicate frame".to_string(), frames: vec![ - Frame { - id, - number: 2, - data: b"four".to_vec(), - ..Default::default() - }, - Frame { - id, - number: 2, - data: b"seven".to_vec(), - ..Default::default() - }, + Frame { id, number: 2, data: b"four".to_vec(), ..Default::default() }, + Frame { id, number: 2, data: b"seven".to_vec(), ..Default::default() }, ], should_error: vec![false, true], sizes: vec![204, 204], @@ -254,18 +218,8 @@ mod test { FrameValidityTestCase { name: "duplicate closing frames".to_string(), frames: vec![ - Frame { - id, - number: 2, - is_last: true, - data: b"four".to_vec(), - }, - Frame { - id, - number: 2, - is_last: true, - data: b"seven".to_vec(), - }, + Frame { id, number: 2, is_last: true, data: b"four".to_vec() }, + Frame { id, number: 2, is_last: true, data: b"seven".to_vec() }, ], should_error: vec![false, true], sizes: vec![204, 204], @@ -273,18 +227,8 @@ mod test { FrameValidityTestCase { name: "frame past closing".to_string(), frames: vec![ - Frame { - id, - number: 2, - is_last: true, - data: b"four".to_vec(), - }, - Frame { - id, - number: 10, - data: b"seven".to_vec(), - ..Default::default() - }, + Frame { id, number: 2, is_last: true, data: b"four".to_vec() }, + Frame { id, number: 10, data: b"seven".to_vec(), ..Default::default() }, ], should_error: vec![false, true], sizes: vec![204, 204], @@ -292,18 +236,8 @@ mod test { FrameValidityTestCase { name: "prune after close frame".to_string(), frames: vec![ - Frame { - id, - number: 10, - is_last: false, - data: b"seven".to_vec(), - }, - Frame { - id, - number: 2, - is_last: true, - data: b"four".to_vec(), - }, + Frame { id, number: 10, is_last: false, data: b"seven".to_vec() }, + Frame { id, number: 2, is_last: true, data: b"four".to_vec() }, ], should_error: vec![false, false], sizes: vec![205, 204], @@ -311,18 +245,8 @@ mod test { FrameValidityTestCase { name: "multiple valid frames".to_string(), frames: vec![ - Frame { - id, - number: 10, - data: b"seven__".to_vec(), - ..Default::default() - }, - Frame { - id, - number: 2, - data: b"four".to_vec(), - ..Default::default() - }, + Frame { id, number: 10, data: b"seven__".to_vec(), ..Default::default() }, + Frame { id, number: 2, data: b"four".to_vec(), ..Default::default() }, ], should_error: vec![false, false], sizes: vec![207, 411], diff --git a/crates/derive/src/types/errors.rs b/crates/derive/src/types/errors.rs index 114a94954..1b97a6249 100644 --- a/crates/derive/src/types/errors.rs +++ b/crates/derive/src/types/errors.rs @@ -20,9 +20,9 @@ impl PartialEq for StageError { fn eq(&self, other: &StageError) -> bool { matches!( (self, other), - (StageError::Eof, StageError::Eof) - | (StageError::NotEnoughData, StageError::NotEnoughData) - | (StageError::Custom(_), StageError::Custom(_)) + (StageError::Eof, StageError::Eof) | + (StageError::NotEnoughData, StageError::NotEnoughData) | + (StageError::Custom(_), StageError::Custom(_)) ) } } @@ -67,8 +67,8 @@ impl PartialEq for DecodeError { fn eq(&self, other: &DecodeError) -> bool { matches!( (self, other), - (DecodeError::EmptyBuffer, DecodeError::EmptyBuffer) - | (DecodeError::AlloyRlpError(_), DecodeError::AlloyRlpError(_)) + (DecodeError::EmptyBuffer, DecodeError::EmptyBuffer) | + (DecodeError::AlloyRlpError(_), DecodeError::AlloyRlpError(_)) ) } } diff --git a/crates/derive/src/types/frame.rs b/crates/derive/src/types/frame.rs index d6ee9eed7..0e728416d 100644 --- a/crates/derive/src/types/frame.rs +++ b/crates/derive/src/types/frame.rs @@ -5,8 +5,8 @@ use alloc::vec::Vec; use anyhow::{anyhow, bail, Result}; /// Frames cannot be larger than 1MB. -/// Data transactions that carry frames are generally not larger than 128 KB due to L1 network conditions, -/// but we leave space to grow larger anyway (gas limit allows for more data). +/// Data transactions that carry frames are generally not larger than 128 KB due to L1 network +/// conditions, but we leave space to grow larger anyway (gas limit allows for more data). const MAX_FRAME_LEN: usize = 1000; /// A channel frame is a segment of a channel's data. @@ -50,19 +50,12 @@ impl Frame { bail!("Frame too short to decode"); } - let id = encoded[..16] - .try_into() - .map_err(|e| anyhow!("Error: {e}"))?; - let number = u16::from_be_bytes( - encoded[16..18] - .try_into() - .map_err(|e| anyhow!("Error: {e}"))?, - ); - let data_len = u32::from_be_bytes( - encoded[18..22] - .try_into() - .map_err(|e| anyhow!("Error: {e}"))?, - ) as usize; + let id = encoded[..16].try_into().map_err(|e| anyhow!("Error: {e}"))?; + let number = + u16::from_be_bytes(encoded[16..18].try_into().map_err(|e| anyhow!("Error: {e}"))?); + let data_len = + u32::from_be_bytes(encoded[18..22].try_into().map_err(|e| anyhow!("Error: {e}"))?) + as usize; if data_len > MAX_FRAME_LEN { bail!("Frame data too large"); @@ -70,20 +63,13 @@ impl Frame { let data = encoded[22..22 + data_len].to_vec(); let is_last = encoded[22 + data_len] != 0; - Ok(( - BASE_FRAME_LEN + data_len, - Self { - id, - number, - data, - is_last, - }, - )) + Ok((BASE_FRAME_LEN + data_len, Self { id, number, data, is_last })) } - /// ParseFrames parse the on chain serialization of frame(s) in an L1 transaction. Currently only version 0 of the - /// serialization format is supported. All frames must be parsed without error and there must not be any left over - /// data and there must be at least one frame. + /// ParseFrames parse the on chain serialization of frame(s) in an L1 transaction. Currently + /// only version 0 of the serialization format is supported. All frames must be parsed + /// without error and there must not be any left over data and there must be at least one + /// frame. /// /// Frames are stored in L1 transactions with the following format: /// * `data = DerivationVersion0 ++ Frame(s)` @@ -115,9 +101,9 @@ impl Frame { Ok(frames) } - /// Calculates the size of the frame + overhead for storing the frame. The sum of the frame size of each frame in - /// a channel determines the channel's size. The sum of the channel sizes is used for pruning & compared against - /// the max channel bank size. + /// Calculates the size of the frame + overhead for storing the frame. The sum of the frame size + /// of each frame in a channel determines the channel's size. The sum of the channel sizes + /// is used for pruning & compared against the max channel bank size. pub fn size(&self) -> usize { self.data.len() + FRAME_OVERHEAD } @@ -131,12 +117,8 @@ mod test { #[test] fn test_encode_frame_roundtrip() { - let frame = Frame { - id: [0xFF; 16], - number: 0xEE, - data: std::vec![0xDD; 50], - is_last: true, - }; + let frame = + Frame { id: [0xFF; 16], number: 0xEE, data: std::vec![0xDD; 50], is_last: true }; let (_, frame_decoded) = Frame::decode(&frame.encode()).unwrap(); assert_eq!(frame, frame_decoded); @@ -144,12 +126,8 @@ mod test { #[test] fn test_decode_many() { - let frame = Frame { - id: [0xFF; 16], - number: 0xEE, - data: std::vec![0xDD; 50], - is_last: true, - }; + let frame = + Frame { id: [0xFF; 16], number: 0xEE, data: std::vec![0xDD; 50], is_last: true }; let mut bytes = Vec::new(); bytes.extend_from_slice(&[DERIVATION_VERSION_0]); (0..5).for_each(|_| { diff --git a/crates/derive/src/types/genesis.rs b/crates/derive/src/types/genesis.rs index 671efc3c4..feff51780 100644 --- a/crates/derive/src/types/genesis.rs +++ b/crates/derive/src/types/genesis.rs @@ -13,7 +13,7 @@ pub struct Genesis { /// Timestamp of the L2 block. pub timestamp: u64, /// Initial system configuration values. - /// The L2 genesis block may not include transactions, and thus cannot encode the config values, - /// unlike later L2 blocks. + /// The L2 genesis block may not include transactions, and thus cannot encode the config + /// values, unlike later L2 blocks. pub system_config: SystemConfig, } diff --git a/crates/derive/src/types/rollup_config.rs b/crates/derive/src/types/rollup_config.rs index d1501b4ff..21d76335b 100644 --- a/crates/derive/src/types/rollup_config.rs +++ b/crates/derive/src/types/rollup_config.rs @@ -26,29 +26,35 @@ pub struct RollupConfig { /// The L2 chain ID pub l2_chain_id: u64, /// `regolith_time` sets the activation time of the Regolith network-upgrade: - /// a pre-mainnet Bedrock change that addresses findings of the Sherlock contest related to deposit attributes. - /// "Regolith" is the loose deposited rock that sits on top of Bedrock. - /// Active if regolith_time != None && L2 block timestamp >= Some(regolith_time), inactive otherwise. + /// a pre-mainnet Bedrock change that addresses findings of the Sherlock contest related to + /// deposit attributes. "Regolith" is the loose deposited rock that sits on top of Bedrock. + /// Active if regolith_time != None && L2 block timestamp >= Some(regolith_time), inactive + /// otherwise. #[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))] pub regolith_time: Option, /// `canyon_time` sets the activation time of the Canyon network upgrade. - /// Active if `canyon_time` != None && L2 block timestamp >= Some(canyon_time), inactive otherwise. + /// Active if `canyon_time` != None && L2 block timestamp >= Some(canyon_time), inactive + /// otherwise. #[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))] pub canyon_time: Option, /// `delta_time` sets the activation time of the Delta network upgrade. - /// Active if `delta_time` != None && L2 block timestamp >= Some(delta_time), inactive otherwise. + /// Active if `delta_time` != None && L2 block timestamp >= Some(delta_time), inactive + /// otherwise. #[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))] pub delta_time: Option, /// `ecotone_time` sets the activation time of the Ecotone network upgrade. - /// Active if `ecotone_time` != None && L2 block timestamp >= Some(ecotone_time), inactive otherwise. + /// Active if `ecotone_time` != None && L2 block timestamp >= Some(ecotone_time), inactive + /// otherwise. #[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))] pub ecotone_time: Option, /// `fjord_time` sets the activation time of the Fjord network upgrade. - /// Active if `fjord_time` != None && L2 block timestamp >= Some(fjord_time), inactive otherwise. + /// Active if `fjord_time` != None && L2 block timestamp >= Some(fjord_time), inactive + /// otherwise. #[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))] pub fjord_time: Option, - /// `interop_time` sets the activation time for an experimental feature-set, activated like a hardfork. - /// Active if `interop_time` != None && L2 block timestamp >= Some(interop_time), inactive otherwise. + /// `interop_time` sets the activation time for an experimental feature-set, activated like a + /// hardfork. Active if `interop_time` != None && L2 block timestamp >= Some(interop_time), + /// inactive otherwise. #[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))] pub interop_time: Option, /// `batch_inbox_address` is the L1 address that batches are sent to. @@ -59,13 +65,15 @@ pub struct RollupConfig { pub l1_system_config_address: Address, /// `protocol_versions_address` is the L1 address that the protocol versions are stored at. pub protocol_versions_address: Address, - /// `blobs_enabled_l1_timestamp` is the timestamp to start reading blobs as a batch data source. Optional. + /// `blobs_enabled_l1_timestamp` is the timestamp to start reading blobs as a batch data + /// source. Optional. #[cfg_attr( feature = "serde", serde(rename = "blobs_data", skip_serializing_if = "Option::is_none") )] pub blobs_enabled_l1_timestamp: Option, - /// `da_challenge_address` is the L1 address that the data availability challenge contract is stored at. + /// `da_challenge_address` is the L1 address that the data availability challenge contract is + /// stored at. #[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))] pub da_challenge_address: Option
, } diff --git a/crates/derive/src/types/system_config.rs b/crates/derive/src/types/system_config.rs index 0ddd85862..cdab8fe8b 100644 --- a/crates/derive/src/types/system_config.rs +++ b/crates/derive/src/types/system_config.rs @@ -74,9 +74,9 @@ impl SystemConfig { receipt.logs.iter().try_for_each(|log| { let topics = log.topics(); - if log.address == rollup_config.l1_system_config_address - && !topics.is_empty() - && topics[0] == CONFIG_UPDATE_TOPIC + if log.address == rollup_config.l1_system_config_address && + !topics.is_empty() && + topics[0] == CONFIG_UPDATE_TOPIC { self.process_config_update_log(log, rollup_config, l1_time)?; } @@ -86,16 +86,17 @@ impl SystemConfig { Ok(()) } - /// Decodes an EVM log entry emitted by the system config contract and applies it as a [SystemConfig] change. + /// Decodes an EVM log entry emitted by the system config contract and applies it as a + /// [SystemConfig] change. /// /// Parse log data for: /// /// ```text - ///event ConfigUpdate( + /// event ConfigUpdate( /// uint256 indexed version, /// UpdateType indexed updateType, /// bytes data - ///); + /// ); /// ``` fn process_config_update_log( &mut self, @@ -293,9 +294,7 @@ mod test { }; // Update the batcher address. - system_config - .process_config_update_log(&update_log, &rollup_config, 0) - .unwrap(); + system_config.process_config_update_log(&update_log, &rollup_config, 0).unwrap(); assert_eq!( system_config.batcher_addr, @@ -324,9 +323,7 @@ mod test { }; // Update the batcher address. - system_config - .process_config_update_log(&update_log, &rollup_config, 0) - .unwrap(); + system_config.process_config_update_log(&update_log, &rollup_config, 0).unwrap(); assert_eq!(system_config.l1_fee_overhead, U256::from(0xbabe)); assert_eq!(system_config.l1_fee_scalar, U256::from(0xbeef)); @@ -353,9 +350,7 @@ mod test { }; // Update the batcher address. - system_config - .process_config_update_log(&update_log, &rollup_config, 10) - .unwrap(); + system_config.process_config_update_log(&update_log, &rollup_config, 10).unwrap(); assert_eq!(system_config.l1_fee_overhead, U256::from(0)); assert_eq!(system_config.l1_fee_scalar, U256::from(0xbeef)); @@ -382,9 +377,7 @@ mod test { }; // Update the batcher address. - system_config - .process_config_update_log(&update_log, &rollup_config, 0) - .unwrap(); + system_config.process_config_update_log(&update_log, &rollup_config, 0).unwrap(); assert_eq!(system_config.gas_limit, U256::from(0xbeef)); } diff --git a/crates/preimage/src/hint.rs b/crates/preimage/src/hint.rs index ff6342878..99861339a 100644 --- a/crates/preimage/src/hint.rs +++ b/crates/preimage/src/hint.rs @@ -2,7 +2,8 @@ use crate::{traits::HintWriterClient, PipeHandle}; use alloc::vec; use anyhow::Result; -/// A [HintWriter] is a high-level interface to the hint pipe. It provides a way to write hints to the host. +/// A [HintWriter] is a high-level interface to the hint pipe. It provides a way to write hints to +/// the host. #[derive(Debug, Clone, Copy)] pub struct HintWriter { pipe_handle: PipeHandle, @@ -16,11 +17,11 @@ impl HintWriter { } impl HintWriterClient for HintWriter { - /// Write a hint to the host. This will overwrite any existing hint in the pipe, and block until all data has been - /// written. + /// Write a hint to the host. This will overwrite any existing hint in the pipe, and block until + /// all data has been written. fn write(&self, hint: &str) -> Result<()> { - // Form the hint into a byte buffer. The format is a 4-byte big-endian length prefix followed by the hint - // string. + // Form the hint into a byte buffer. The format is a 4-byte big-endian length prefix + // followed by the hint string. let mut hint_bytes = vec![0u8; hint.len() + 4]; hint_bytes[0..4].copy_from_slice(u32::to_be_bytes(hint.len() as u32).as_ref()); hint_bytes[4..].copy_from_slice(hint.as_bytes()); diff --git a/crates/preimage/src/key.rs b/crates/preimage/src/key.rs index 28e9ed8eb..4c1e9a565 100644 --- a/crates/preimage/src/key.rs +++ b/crates/preimage/src/key.rs @@ -1,27 +1,30 @@ -//! Contains the [PreimageKey] type, which is used to identify preimages that may be fetched from the preimage oracle. +//! Contains the [PreimageKey] type, which is used to identify preimages that may be fetched from +//! the preimage oracle. /// #[derive(Debug, Default, Clone, Copy, Eq, PartialEq)] #[repr(u8)] pub enum PreimageKeyType { - /// Local key types are local to a given instance of a fault-proof and context dependent. Commonly these local keys - /// are mapped to bootstrap data for the fault proof program. + /// Local key types are local to a given instance of a fault-proof and context dependent. + /// Commonly these local keys are mapped to bootstrap data for the fault proof program. Local = 1, - /// Keccak256 key types are global and context independent. Preimages are mapped from the low-order 31 bytes of - /// the preimage's `keccak256` digest to the preimage itself. + /// Keccak256 key types are global and context independent. Preimages are mapped from the + /// low-order 31 bytes of the preimage's `keccak256` digest to the preimage itself. #[default] Keccak256 = 2, /// GlobalGeneric key types are reserved for future use. GlobalGeneric = 3, - /// Sha256 key types are global and context independent. Preimages are mapped from the low-order 31 bytes of - /// the preimage's `sha256` digest to the preimage itself. + /// Sha256 key types are global and context independent. Preimages are mapped from the + /// low-order 31 bytes of the preimage's `sha256` digest to the preimage itself. Sha256 = 4, - /// Blob key types are global and context independent. Blob keys are constructed as `keccak256(commitment ++ z)`, - /// and then the high-order byte of the digest is set to the type byte. + /// Blob key types are global and context independent. Blob keys are constructed as + /// `keccak256(commitment ++ z)`, and then the high-order byte of the digest is set to the + /// type byte. Blob = 5, } -/// A preimage key is a 32-byte value that identifies a preimage that may be fetched from the oracle. +/// A preimage key is a 32-byte value that identifies a preimage that may be fetched from the +/// oracle. /// /// **Layout**: /// | Bits | Description | @@ -35,23 +38,20 @@ pub struct PreimageKey { } impl PreimageKey { - /// Creates a new [PreimageKey] from a 32-byte value and a [PreimageKeyType]. The 32-byte value will be truncated - /// to 31 bytes by taking the low-order 31 bytes. + /// Creates a new [PreimageKey] from a 32-byte value and a [PreimageKeyType]. The 32-byte value + /// will be truncated to 31 bytes by taking the low-order 31 bytes. pub fn new(key: [u8; 32], key_type: PreimageKeyType) -> Self { let mut data = [0u8; 31]; data.copy_from_slice(&key[1..]); Self { data, key_type } } - /// Creates a new local [PreimageKey] from a 64-bit local identifier. The local identifier will be written into the - /// low-order 8 bytes of the big-endian 31-byte data field. + /// Creates a new local [PreimageKey] from a 64-bit local identifier. The local identifier will + /// be written into the low-order 8 bytes of the big-endian 31-byte data field. pub fn new_local(local_ident: u64) -> Self { let mut data = [0u8; 31]; data[23..].copy_from_slice(&local_ident.to_be_bytes()); - Self { - data, - key_type: PreimageKeyType::Local, - } + Self { data, key_type: PreimageKeyType::Local } } /// Returns the [PreimageKeyType] for the [PreimageKey]. diff --git a/crates/preimage/src/lib.rs b/crates/preimage/src/lib.rs index c7b7a6b9f..7dfaf40b6 100644 --- a/crates/preimage/src/lib.rs +++ b/crates/preimage/src/lib.rs @@ -1,10 +1,5 @@ #![doc = include_str!("../README.md")] -#![warn( - missing_debug_implementations, - missing_docs, - unreachable_pub, - rustdoc::all -)] +#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)] #![deny(unused_must_use, rust_2018_idioms)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![no_std] diff --git a/crates/preimage/src/oracle.rs b/crates/preimage/src/oracle.rs index e319825ab..2bd9710e0 100644 --- a/crates/preimage/src/oracle.rs +++ b/crates/preimage/src/oracle.rs @@ -14,8 +14,9 @@ impl OracleReader { Self { pipe_handle } } - /// Set the preimage key for the global oracle reader. This will overwrite any existing key, and block until the - /// host has prepared the preimage and responded with the length of the preimage. + /// Set the preimage key for the global oracle reader. This will overwrite any existing key, and + /// block until the host has prepared the preimage and responded with the length of the + /// preimage. fn write_key(&mut self, key: PreimageKey) -> Result { // Write the key to the host so that it can prepare the preimage. let key_bytes: [u8; 32] = key.into(); @@ -29,8 +30,8 @@ impl OracleReader { } impl PreimageOracleClient for OracleReader { - /// Get the data corresponding to the currently set key from the host. Return the data in a new heap allocated - /// `Vec` + /// Get the data corresponding to the currently set key from the host. Return the data in a new + /// heap allocated `Vec` fn get(&mut self, key: PreimageKey) -> Result> { let length = self.write_key(key)?; let mut data_buffer = alloc::vec![0; length]; @@ -41,18 +42,15 @@ impl PreimageOracleClient for OracleReader { Ok(data_buffer) } - /// Get the data corresponding to the currently set key from the host. Write the data into the provided buffer + /// Get the data corresponding to the currently set key from the host. Write the data into the + /// provided buffer fn get_exact(&mut self, key: PreimageKey, buf: &mut [u8]) -> Result<()> { // Write the key to the host and read the length of the preimage. let length = self.write_key(key)?; // Ensure the buffer is the correct size. if buf.len() != length { - bail!( - "Buffer size {} does not match preimage size {}", - buf.len(), - length - ); + bail!("Buffer size {} does not match preimage size {}", buf.len(), length); } self.pipe_handle.read_exact(buf)?; @@ -71,8 +69,9 @@ mod test { use std::{fs::File, os::fd::AsRawFd}; use tempfile::tempfile; - /// Test struct containing the [OracleReader] and a [PipeHandle] for the host, plus the open [File]s. The [File]s - /// are stored in this struct so that they are not dropped until the end of the test. + /// Test struct containing the [OracleReader] and a [PipeHandle] for the host, plus the open + /// [File]s. The [File]s are stored in this struct so that they are not dropped until the + /// end of the test. /// /// TODO: Swap host pipe handle to oracle writer once it exists. #[derive(Debug)] @@ -83,8 +82,8 @@ mod test { _write_file: File, } - /// Helper for creating a new [OracleReader] and [PipeHandle] for testing. The file channel is over two temporary - /// files. + /// Helper for creating a new [OracleReader] and [PipeHandle] for testing. The file channel is + /// over two temporary files. /// /// TODO: Swap host pipe handle to oracle writer once it exists. fn client_and_host() -> ClientAndHost { @@ -98,12 +97,7 @@ mod test { let oracle_reader = OracleReader::new(client_handle); - ClientAndHost { - oracle_reader, - host_handle, - _read_file: read_file, - _write_file: write_file, - } + ClientAndHost { oracle_reader, host_handle, _read_file: read_file, _write_file: write_file } } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -113,9 +107,7 @@ mod test { let (mut oracle_reader, host_handle) = (sys.oracle_reader, sys.host_handle); let client = tokio::task::spawn(async move { - oracle_reader - .get(PreimageKey::new([0u8; 32], PreimageKeyType::Keccak256)) - .unwrap() + oracle_reader.get(PreimageKey::new([0u8; 32], PreimageKeyType::Keccak256)).unwrap() }); let host = tokio::task::spawn(async move { let mut length_and_data: [u8; 8 + 10] = [0u8; 8 + 10]; diff --git a/crates/preimage/src/pipe.rs b/crates/preimage/src/pipe.rs index 42194a69f..e9d7bb67f 100644 --- a/crates/preimage/src/pipe.rs +++ b/crates/preimage/src/pipe.rs @@ -1,5 +1,5 @@ -//! This module contains a rudamentary pipe between two file descriptors, using [kona_common::io] for -//! reading and writing from the file descriptors. +//! This module contains a rudamentary pipe between two file descriptors, using [kona_common::io] +//! for reading and writing from the file descriptors. use anyhow::{bail, Result}; use kona_common::{io, FileDescriptor, RegisterSize}; @@ -16,10 +16,7 @@ pub struct PipeHandle { impl PipeHandle { /// Create a new [PipeHandle] from two file descriptors. pub const fn new(read_handle: FileDescriptor, write_handle: FileDescriptor) -> Self { - Self { - read_handle, - write_handle, - } + Self { read_handle, write_handle } } /// Read from the pipe into the given buffer. diff --git a/crates/preimage/src/traits.rs b/crates/preimage/src/traits.rs index e7676c3d6..4954701c8 100644 --- a/crates/preimage/src/traits.rs +++ b/crates/preimage/src/traits.rs @@ -2,17 +2,19 @@ use crate::PreimageKey; use alloc::vec::Vec; use anyhow::Result; -/// A [PreimageOracleClient] is a high-level interface to read data from the host, keyed by a [PreimageKey]. +/// A [PreimageOracleClient] is a high-level interface to read data from the host, keyed by a +/// [PreimageKey]. pub trait PreimageOracleClient { - /// Get the data corresponding to the currently set key from the host. Return the data in a new heap allocated - /// `Vec` + /// Get the data corresponding to the currently set key from the host. Return the data in a new + /// heap allocated `Vec` /// /// # Returns /// - `Ok(Vec)` if the data was successfully fetched from the host. /// - `Err(_)` if the data could not be fetched from the host. fn get(&mut self, key: PreimageKey) -> Result>; - /// Get the data corresponding to the currently set key from the host. Writes the data into the provided buffer. + /// Get the data corresponding to the currently set key from the host. Writes the data into the + /// provided buffer. /// /// # Returns /// - `Ok(())` if the data was successfully written into the buffer. @@ -20,10 +22,11 @@ pub trait PreimageOracleClient { fn get_exact(&mut self, key: PreimageKey, buf: &mut [u8]) -> Result<()>; } -/// A [HintWriterClient] is a high-level interface to the hint pipe. It provides a way to write hints to the host. +/// A [HintWriterClient] is a high-level interface to the hint pipe. It provides a way to write +/// hints to the host. pub trait HintWriterClient { - /// Write a hint to the host. This will overwrite any existing hint in the pipe, and block until all data has been - /// written. + /// Write a hint to the host. This will overwrite any existing hint in the pipe, and block until + /// all data has been written. /// /// # Returns /// - `Ok(())` if the hint was successfully written to the host. diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000..68c3c9303 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,11 @@ +reorder_imports = true +imports_granularity = "Crate" +use_small_heuristics = "Max" +comment_width = 100 +wrap_comments = true +binop_separator = "Back" +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true +format_code_in_doc_comments = true +doc_comment_code_block_width = 100