Skip to content

feat: derivation pipeline #40

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 24 commits into from
Apr 2, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions .github/workflows/lint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -127,10 +127,12 @@ jobs:
include:
- type: wasm
target: wasm32-unknown-unknown
exclude: scroll-engine,scroll-wire,scroll-network,rollup-node-manager,rollup-node-watcher,scroll-db,scroll-migration,rollup-node-indexer,rollup-node
exclude: |
scroll-engine,scroll-wire,rollup-node,scroll-network,rollup-node-manager,rollup-node-watcher,scroll-db,scroll-migration,rollup-node-indexer,scroll-derivation-pipeline
- type: riscv
target: riscv32imac-unknown-none-elf
exclude: scroll-engine,scroll-wire,scroll-network,rollup-node-manager,rollup-node-watcher,scroll-db,scroll-migration,rollup-node-indexer,rollup-node,scroll-codec
exclude: |
scroll-engine,scroll-wire,rollup-node,scroll-network,rollup-node-manager,rollup-node-watcher,scroll-db,scroll-migration,rollup-node-indexer,scroll-codec,scroll-derivation-pipeline
steps:
- uses: actions/checkout@v4
- uses: rui314/setup-mold@v1
Expand Down
15 changes: 15 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 6 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,10 @@ exclude = [".github/"]
[workspace]
members = [
"bin/rollup",
"crates/codec",
"crates/database/db",
"crates/database/migration",
"crates/codec",
"crates/derivation-pipeline",
"crates/engine",
"crates/indexer",
"crates/l1",
Expand Down Expand Up @@ -127,8 +128,9 @@ alloy-transport = { version = "0.12.2", default-features = false }

# scroll-alloy
scroll-alloy-consensus = { git = "https://github.com/scroll-tech/reth.git", default-features = false }
scroll-alloy-provider = { git = "https://github.com/scroll-tech/reth.git", default-features = false }
scroll-alloy-network = { git = "https://github.com/scroll-tech/reth.git", default-features = false }
scroll-alloy-provider = { git = "https://github.com/scroll-tech/reth.git", default-features = false }
scroll-alloy-rpc-types-engine = { git = "https://github.com/scroll-tech/reth.git" }

# reth
reth-eth-wire-types = { git = "https://github.com/scroll-tech/reth.git", default-features = false }
Expand All @@ -151,8 +153,9 @@ rollup-node-indexer = { path = "crates/indexer" }
rollup-node-manager = { path = "crates/node" }
rollup-node-primitives = { path = "crates/primitives" }
rollup-node-watcher = { path = "crates/watcher" }
scroll-db = { path = "crates/database/db" }
scroll-codec = { path = "crates/codec" }
scroll-db = { path = "crates/database/db" }
scroll-derivation-pipeline = { path = "crates/derivation-pipeline" }
scroll-engine = { path = "crates/engine" }
scroll-l1 = { path = "crates/l1" }
scroll-network = { path = "crates/network" }
Expand Down
2 changes: 1 addition & 1 deletion bin/rollup/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ alloy-rpc-types-engine.workspace = true

# scroll-alloy
scroll-alloy-consensus.workspace = true
scroll-alloy-rpc-types-engine = { git = "https://github.com/scroll-tech/reth.git" }
scroll-alloy-rpc-types-engine.workspace = true
scroll-alloy-provider.workspace = true

# reth
Expand Down
2 changes: 2 additions & 0 deletions crates/codec/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ edition = "2021"

[dependencies]
# alloy
scroll-alloy-consensus.workspace = true
alloy-eips.workspace = true
alloy-primitives.workspace = true
alloy-rlp = { version = "0.3", default-features = false }
Expand All @@ -21,6 +22,7 @@ zstd = "0.13"

[dev-dependencies]
eyre.workspace = true
serde_json = "1.0"

[features]
test-utils = ["dep:eyre", "scroll-l1/test-utils"]
21 changes: 21 additions & 0 deletions crates/codec/src/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,27 @@ pub struct BlockContext {
pub base_fee: U256,
/// The block gas limit.
pub gas_limit: u64,
/// The block's total transaction count.
pub num_transactions: u16,
/// The block's l1 message count.
pub num_l1_messages: u16,
}

impl BlockContext {
pub const BYTES_LENGTH: usize = 60;

/// Returns an owned array which contains all fields of the [`BlockContext`].
pub fn to_be_bytes(&self) -> [u8; Self::BYTES_LENGTH] {
let mut buf = [0u8; Self::BYTES_LENGTH];

buf[..8].copy_from_slice(&self.number.to_be_bytes());
buf[8..16].copy_from_slice(&self.timestamp.to_be_bytes());
if self.base_fee != U256::ZERO {
buf[16..48].copy_from_slice(&self.base_fee.to_be_bytes::<32>());
}
buf[48..56].copy_from_slice(&self.gas_limit.to_be_bytes());
buf[56..58].copy_from_slice(&self.num_transactions.to_be_bytes());
buf[58..].copy_from_slice(&self.num_l1_messages.to_be_bytes());
buf
}
}
202 changes: 202 additions & 0 deletions crates/codec/src/decoding/batch.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
use crate::{
decoding::{constants::KECCAK_256_DIGEST_BYTES_SIZE, payload::PayloadData},
BlockContext, L2Block,
};

use alloy_primitives::{bytes::BufMut, keccak256, B256};
use scroll_alloy_consensus::TxL1Message;

/// The deserialized batch data.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Batch {
/// The batch version.
pub version: u8,
/// The amount of blocks for each chunk of the batch. Only relevant for codec versions v0 ->
/// v6.
pub chunks_block_count: Option<Vec<usize>>,
/// The data for the batch.
pub data: PayloadData,
}

impl Batch {
/// Returns a new instance of a batch.
pub fn new(version: u8, chunks_block_count: Option<Vec<usize>>, data: PayloadData) -> Self {
Self { version, chunks_block_count, data }
}

/// Computes the data hash for the batch, using the provided L1 messages associated with each
/// block.
pub fn try_compute_data_hash(&self, l1_messages: &[TxL1Message]) -> Option<B256> {
// From version 7 and above, the batch doesn't have a data hash.
if self.version >= 7 {
return None;
}

let total_l1_messages: usize =
self.data.l2_blocks().iter().map(|b| b.context.num_l1_messages as usize).sum();
debug_assert_eq!(total_l1_messages, l1_messages.len(), "invalid l1 messages count");

let chunks_count = self.chunks_block_count.as_ref()?;
let blocks_buf = &mut (&**self.data.l2_blocks());
let l1_messages_buf = &mut (&*l1_messages);

let mut chunk_hashes =
Vec::with_capacity(chunks_count.len() * KECCAK_256_DIGEST_BYTES_SIZE);

for chunk_count in chunks_count {
// slice the blocks at chunk_count.
let blocks = blocks_buf.get(..*chunk_count)?;

// take the correct amount of l1 messages for each block and advance the buffer.
let l1_messages_per_block = blocks
.iter()
.map(|b| {
let num_l1_messages = b.context.num_l1_messages as usize;
let block_messages = l1_messages_buf.get(..num_l1_messages).unwrap_or(&[]);
*l1_messages_buf = l1_messages_buf.get(num_l1_messages..).unwrap_or(&[]);
Comment on lines +55 to +56
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

does the l1_messages_buf type have a method such as split? that would be more efficient?

Copy link
Collaborator Author

@greged93 greged93 Apr 1, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not 100% sure how you would use split, would you mind elaborating?
The slice type has a split iterator which you can get by calling split.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think split_at or split_at_mut would be appropriate and a little more ergonomic but this isn't a big deal.

Copy link
Collaborator Author

@greged93 greged93 Apr 2, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the only issue with these methods is that they can panic

block_messages
})
.collect::<Vec<_>>();

// compute the chunk data hash.
chunk_hashes
.append(&mut hash_chunk(self.version, blocks, l1_messages_per_block).to_vec());

// advance the buffer.
*blocks_buf = blocks_buf.get(*chunk_count..).unwrap_or(&[]);
}

Some(keccak256(chunk_hashes))
}
}

/// Compute the hash for the chunk.
fn hash_chunk(
version: u8,
l2_blocks: &[L2Block],
l1_messages_per_block: Vec<&[TxL1Message]>,
) -> B256 {
// reserve the correct capacity.
let l1_messages_count: usize =
l1_messages_per_block.iter().map(|messages| messages.len()).sum();
let mut capacity = l2_blocks.len() * (BlockContext::BYTES_LENGTH - 2) +
l1_messages_count * KECCAK_256_DIGEST_BYTES_SIZE;
if version == 0 {
capacity += l2_blocks.iter().map(|b| b.transactions.len()).sum::<usize>();
}
let mut buf = Vec::with_capacity(capacity);

for block in l2_blocks {
let context = block.context.to_be_bytes();
// we don't use the last 2 bytes.
// <https://github.com/scroll-tech/da-codec/blob/main/encoding/codecv0_types.go#L175>
buf.put_slice(&context[..BlockContext::BYTES_LENGTH - 2]);
}

for (block, l1_messages) in l2_blocks.iter().zip(l1_messages_per_block) {
for l1_message in l1_messages {
buf.put_slice(l1_message.tx_hash().as_slice())
}

// for v0, we add the l2 transaction hashes.
if version == 0 {
for tx in &block.transactions {
buf.put_slice(keccak256(&tx.0).as_slice());
}
}
}

keccak256(buf)
}

#[cfg(test)]
mod tests {
use crate::decoding::{test_utils::read_to_bytes, v0::decode_v0, v1::decode_v1};

use alloy_primitives::{address, b256, bytes, U256};
use scroll_alloy_consensus::TxL1Message;

#[test]
fn test_should_compute_data_hash_v0() -> eyre::Result<()> {
// <https://etherscan.io/tx/0x2c7bb77d6086befd9bdcf936479fd246d1065cbd2c6aff55b1d39a67aff965c1>
let raw_calldata = read_to_bytes("./testdata/calldata_v0.bin")?;
let batch = decode_v0(&raw_calldata)?;

let hash = batch.try_compute_data_hash(&[]).unwrap();

assert_eq!(hash, b256!("33e608dbf683c1ee03a34d01de52f67d60a0563b7e713b65a7395bb3b646f71f"));

Ok(())
}

#[test]
fn test_should_compute_data_hash_v0_with_l1_messages() -> eyre::Result<()> {
// <https://etherscan.io/tx/0xdc0a315b25b46f4c1085e3884c63f8ede61e984e47655f7667e5f14e3df55f82>
let raw_calldata = read_to_bytes("./testdata/calldata_v0_with_l1_messages.bin")?;
let batch = decode_v0(&raw_calldata)?;

let hash = batch
.try_compute_data_hash(&[
TxL1Message {
queue_index: 39,
gas_limit: 180000,
to: address!("781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC"),
value: U256::ZERO,
sender: address!("7885BcBd5CeCEf1336b5300fb5186A12DDD8c478"),
input: bytes!("8ef1332e000000000000000000000000f1af3b23de0a5ca3cab7261cb0061c0d779a5c7b00000000000000000000000033b60d5dd260d453cac3782b0bdc01ce846721420000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e48431f5c1000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb4800000000000000000000000006efdbff2a14a7c8e15944d1f4a48f9f95f663a4000000000000000000000000c451b0191351ce308fdfd779d73814c910fc5ecb000000000000000000000000c451b0191351ce308fdfd779d73814c910fc5ecb00000000000000000000000000000000000000000000000000000005d21dba0000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
},
TxL1Message {
queue_index: 40,
gas_limit: 168000,
to: address!("781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC"),
value: U256::ZERO,
sender: address!("7885BcBd5CeCEf1336b5300fb5186A12DDD8c478"),
input: bytes!("8ef1332e0000000000000000000000007f2b8c31f88b6006c382775eea88297ec1e3e9050000000000000000000000006ea73e05adc79974b931123675ea8f78ffdacdf00000000000000000000000000000000000000000000000000011c37937e08000000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000a4232e8748000000000000000000000000b89db2813541287a4dd1fc6801eec30595ecdc6c000000000000000000000000b89db2813541287a4dd1fc6801eec30595ecdc6c0000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
},
TxL1Message {
queue_index: 41,
gas_limit: 168000,
to: address!("781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC"),
value: U256::ZERO,
sender: address!("7885BcBd5CeCEf1336b5300fb5186A12DDD8c478"),
input: bytes!("8ef1332e0000000000000000000000007f2b8c31f88b6006c382775eea88297ec1e3e9050000000000000000000000006ea73e05adc79974b931123675ea8f78ffdacdf0000000000000000000000000000000000000000000000000002386f26fc10000000000000000000000000000000000000000000000000000000000000000002900000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000a4232e87480000000000000000000000003219c394111d45757ccb68a4fd353b4f7f9660960000000000000000000000003219c394111d45757ccb68a4fd353b4f7f966096000000000000000000000000000000000000000000000000002386f26fc100000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
},
])
.unwrap();

assert_eq!(hash, b256!("55fd647c58461d910b5bfb4539f2177ba575c9c8d578a344558976a4375cc287"));

Ok(())
}

#[test]
fn test_should_compute_data_hash_v1() -> eyre::Result<()> {
// <https://etherscan.io/tx/0x27d73eef6f0de411f8db966f0def9f28c312a0ae5cfb1ac09ec23f8fa18b005b>
let raw_calldata = read_to_bytes("./testdata/calldata_v1.bin")?;
let blob = read_to_bytes("./testdata/blob_v1.bin")?;
let batch = decode_v1(&raw_calldata, &blob)?;

let hash = batch.try_compute_data_hash(&[]).unwrap();

assert_eq!(hash, b256!("c20f5914a772663080f8a77955b33814a04f7a19c880536e562a1bcfd5343a37"));

Ok(())
}

#[test]
fn test_should_compute_data_hash_v1_with_l1_messages() -> eyre::Result<()> {
// <https://etherscan.io/tx/0x30451fc1a7ad4a87f9a2616e972d2489326bafa2a41aba8cfb664aec5f727d94>
let raw_calldata = read_to_bytes("./testdata/calldata_v1_with_l1_messages.bin")?;
let raw_blob = read_to_bytes("./testdata/blob_v1_with_l1_messages.bin")?;
let batch = decode_v1(&raw_calldata, &raw_blob)?;

let l1_messages: Vec<TxL1Message> =
serde_json::from_str(&std::fs::read_to_string("./testdata/l1_messages_v1.json")?)?;

let hash = batch.try_compute_data_hash(&l1_messages).unwrap();

assert_eq!(hash, b256!("e20ac534891e7f96c3a945e2aafe0a05c7079959eccd94ad217ee0f3b29ac030"));

Ok(())
}
}
2 changes: 1 addition & 1 deletion crates/codec/src/decoding/blob.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ mod tests {

#[test]
fn test_should_skip_unused_blob_bytes() -> eyre::Result<()> {
let blob = read_to_bytes("./src/testdata/blob_v1.bin")?;
let blob = read_to_bytes("./testdata/blob_v1.bin")?;
let iterator = BlobSliceIter::from_blob_slice(&blob);

let val = iterator.take(256).copied().collect::<Vec<_>>();
Expand Down
5 changes: 5 additions & 0 deletions crates/codec/src/decoding/constants.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
/// The length in bytes of the Keccak 256 hash digest.
pub const KECCAK_256_DIGEST_BYTES_SIZE: usize = 32;

/// The length in bytes of each item in the skipped L1 messages bitmap.
pub const SKIPPED_L1_MESSAGE_BITMAP_ITEM_BYTES_SIZE: usize = 32;
13 changes: 12 additions & 1 deletion crates/codec/src/decoding/macros.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,23 @@ macro_rules! from_be_bytes_slice_and_advance_buf {
let mut arr = [0u8; ::std::mem::size_of::<$ty>()];
let size = $size;
let size_of = ::std::mem::size_of::<$ty>();
arr[size_of - size..].copy_from_slice(&$slice[0..size]);
arr[size_of - size..].copy_from_slice(&$slice[..size]);
::alloy_primitives::bytes::Buf::advance($slice, size);
<$ty>::from_be_bytes(arr)
}};
}

/// Calls `from_slice` on the provided type using the passed in buffer and advances it.
#[macro_export]
macro_rules! from_slice_and_advance_buf {
($ty:ty, $slice: expr) => {{
let size_of = ::std::mem::size_of::<$ty>();
let t = <$ty>::from_slice(&$slice[..size_of]);
::alloy_primitives::bytes::Buf::advance($slice, size_of);
t
}};
}

/// Check the buffer input to have the required length. Returns an Eof error otherwise.
#[macro_export]
macro_rules! check_buf_len {
Expand Down
Loading